summaryrefslogtreecommitdiff
path: root/tools/include/linux
diff options
context:
space:
mode:
Diffstat (limited to 'tools/include/linux')
0 files changed, 0 insertions, 0 deletions
'/cgit/linux-arm.git/?h=drm-dwhdmi-devel'>summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Makefile1
-rw-r--r--drivers/acpi/Makefile1
-rw-r--r--drivers/acpi/acpi_lpat.c6
-rw-r--r--drivers/acpi/acpi_lpss.c3
-rw-r--r--drivers/acpi/acpi_processor.c2
-rw-r--r--drivers/acpi/acpi_video.c2
-rw-r--r--drivers/acpi/acpica/Makefile1
-rw-r--r--drivers/acpi/acpica/acapps.h2
-rw-r--r--drivers/acpi/acpica/acdispat.h13
-rw-r--r--drivers/acpi/acpica/aclocal.h7
-rw-r--r--drivers/acpi/acpica/acobject.h15
-rw-r--r--drivers/acpi/acpica/actables.h5
-rw-r--r--drivers/acpi/acpica/acutils.h9
-rw-r--r--drivers/acpi/acpica/dbdisply.c37
-rw-r--r--drivers/acpi/acpica/dsfield.c2
-rw-r--r--drivers/acpi/acpica/dsobject.c398
-rw-r--r--drivers/acpi/acpica/dsopcode.c9
-rw-r--r--drivers/acpi/acpica/dspkginit.c496
-rw-r--r--drivers/acpi/acpica/evgpeblk.c30
-rw-r--r--drivers/acpi/acpica/evxfgpe.c8
-rw-r--r--drivers/acpi/acpica/excreate.c62
-rw-r--r--drivers/acpi/acpica/exdump.c34
-rw-r--r--drivers/acpi/acpica/exmisc.c9
-rw-r--r--drivers/acpi/acpica/exoparg2.c3
-rw-r--r--drivers/acpi/acpica/hwregs.c2
-rw-r--r--drivers/acpi/acpica/hwxfsleep.c9
-rw-r--r--drivers/acpi/acpica/nsaccess.c28
-rw-r--r--drivers/acpi/acpica/nsarguments.c21
-rw-r--r--drivers/acpi/acpica/nsinit.c14
-rw-r--r--drivers/acpi/acpica/nsnames.c9
-rw-r--r--drivers/acpi/acpica/nsprepkg.c2
-rw-r--r--drivers/acpi/acpica/nsxfeval.c10
-rw-r--r--drivers/acpi/acpica/psloop.c14
-rw-r--r--drivers/acpi/acpica/psobject.c26
-rw-r--r--drivers/acpi/acpica/rsxface.c7
-rw-r--r--drivers/acpi/acpica/tbdata.c230
-rw-r--r--drivers/acpi/acpica/tbinstal.c161
-rw-r--r--drivers/acpi/acpica/tbxface.c39
-rw-r--r--drivers/acpi/acpica/tbxfload.c2
-rw-r--r--drivers/acpi/acpica/uthex.c4
-rw-r--r--drivers/acpi/acpica/utmath.c222
-rw-r--r--drivers/acpi/acpica/utmisc.c10
-rw-r--r--drivers/acpi/acpica/utobject.c5
-rw-r--r--drivers/acpi/acpica/utprint.c8
-rw-r--r--drivers/acpi/acpica/utresrc.c7
-rw-r--r--drivers/acpi/acpica/utstate.c2
-rw-r--r--drivers/acpi/acpica/utstrtoul64.c9
-rw-r--r--drivers/acpi/acpica/uttrack.c9
-rw-r--r--drivers/acpi/apei/apei-internal.h5
-rw-r--r--drivers/acpi/apei/einj.c2
-rw-r--r--drivers/acpi/apei/ghes.c10
-rw-r--r--drivers/acpi/apei/hest.c13
-rw-r--r--drivers/acpi/arm64/iort.c197
-rw-r--r--drivers/acpi/battery.c2
-rw-r--r--drivers/acpi/blacklist.c87
-rw-r--r--drivers/acpi/bus.c7
-rw-r--r--drivers/acpi/device_pm.c175
-rw-r--r--drivers/acpi/dock.c2
-rw-r--r--drivers/acpi/ec.c50
-rw-r--r--drivers/acpi/internal.h6
-rw-r--r--drivers/acpi/nfit/Kconfig2
-rw-r--r--drivers/acpi/nfit/core.c52
-rw-r--r--drivers/acpi/osi.c39
-rw-r--r--drivers/acpi/pci_root.c4
-rw-r--r--drivers/acpi/pci_slot.c2
-rw-r--r--drivers/acpi/pmic/intel_pmic_xpower.c21
-rw-r--r--drivers/acpi/processor_driver.c2
-rw-r--r--drivers/acpi/processor_idle.c25
-rw-r--r--drivers/acpi/processor_pdc.c2
-rw-r--r--drivers/acpi/property.c239
-rw-r--r--drivers/acpi/resource.c82
-rw-r--r--drivers/acpi/sbs.c27
-rw-r--r--drivers/acpi/scan.c107
-rw-r--r--drivers/acpi/sleep.c206
-rw-r--r--drivers/acpi/spcr.c36
-rw-r--r--drivers/acpi/sysfs.c91
-rw-r--r--drivers/acpi/tables.c4
-rw-r--r--drivers/acpi/thermal.c2
-rw-r--r--drivers/acpi/utils.c36
-rw-r--r--drivers/acpi/video_detect.c14
-rw-r--r--drivers/acpi/x86/apple.c141
-rw-r--r--drivers/android/Kconfig14
-rw-r--r--drivers/android/Makefile3
-rw-r--r--drivers/android/binder.c3762
-rw-r--r--drivers/android/binder_alloc.c1009
-rw-r--r--drivers/android/binder_alloc.h187
-rw-r--r--drivers/android/binder_alloc_selftest.c310
-rw-r--r--drivers/android/binder_trace.h96
-rw-r--r--drivers/ata/Kconfig10
-rw-r--r--drivers/ata/Makefile1
-rw-r--r--drivers/ata/ahci.c9
-rw-r--r--drivers/ata/ahci_da850.c8
-rw-r--r--drivers/ata/ahci_mtk.c196
-rw-r--r--drivers/ata/ahci_platform.c1
-rw-r--r--drivers/ata/libahci_platform.c34
-rw-r--r--drivers/ata/libata-core.c3
-rw-r--r--drivers/ata/libata-eh.c8
-rw-r--r--drivers/ata/libata-zpodd.c4
-rw-r--r--drivers/ata/pata_amd.c1
-rw-r--r--drivers/ata/pata_cs5536.c1
-rw-r--r--drivers/ata/pata_octeon_cf.c10
-rw-r--r--drivers/ata/sata_gemini.c67
-rw-r--r--drivers/ata/sata_svw.c2
-rw-r--r--drivers/atm/adummy.c4
-rw-r--r--drivers/atm/ambassador.c2
-rw-r--r--drivers/atm/atmtcp.c2
-rw-r--r--drivers/atm/eni.c2
-rw-r--r--drivers/atm/firestream.c2
-rw-r--r--drivers/atm/fore200e.c2
-rw-r--r--drivers/atm/he.c4
-rw-r--r--drivers/atm/horizon.c2
-rw-r--r--drivers/atm/idt77252.c4
-rw-r--r--drivers/atm/iphase.c2
-rw-r--r--drivers/atm/lanai.c2
-rw-r--r--drivers/atm/nicstar.c4
-rw-r--r--drivers/atm/solos-pci.c8
-rw-r--r--drivers/atm/zatm.c2
-rw-r--r--drivers/auxdisplay/panel.c6
-rw-r--r--drivers/base/Kconfig5
-rw-r--r--drivers/base/arch_topology.c86
-rw-r--r--drivers/base/base.h5
-rw-r--r--drivers/base/bus.c2
-rw-r--r--drivers/base/core.c141
-rw-r--r--drivers/base/cpu.c4
-rw-r--r--drivers/base/dd.c32
-rw-r--r--drivers/base/dma-coherent.c85
-rw-r--r--drivers/base/dma-mapping.c7
-rw-r--r--drivers/base/firmware_class.c112
-rw-r--r--drivers/base/memory.c30
-rw-r--r--drivers/base/node.c22
-rw-r--r--drivers/base/power/domain.c251
-rw-r--r--drivers/base/power/main.c103
-rw-r--r--drivers/base/power/opp/of.c37
-rw-r--r--drivers/base/power/wakeup.c10
-rw-r--r--drivers/base/property.c131
-rw-r--r--drivers/base/topology.c2
-rw-r--r--drivers/bcma/Kconfig9
-rw-r--r--drivers/bcma/driver_gpio.c1
-rw-r--r--drivers/block/DAC960.c12
-rw-r--r--drivers/block/Kconfig30
-rw-r--r--drivers/block/Makefile1
-rw-r--r--drivers/block/brd.c11
-rw-r--r--drivers/block/cciss.c5415
-rw-r--r--drivers/block/cciss.h433
-rw-r--r--drivers/block/cciss_cmd.h269
-rw-r--r--drivers/block/cciss_scsi.c1653
-rw-r--r--drivers/block/cciss_scsi.h79
-rw-r--r--drivers/block/drbd/drbd_actlog.c2
-rw-r--r--drivers/block/drbd/drbd_bitmap.c2
-rw-r--r--drivers/block/drbd/drbd_int.h31
-rw-r--r--drivers/block/drbd/drbd_main.c113
-rw-r--r--drivers/block/drbd/drbd_nl.c60
-rw-r--r--drivers/block/drbd/drbd_proc.c10
-rw-r--r--drivers/block/drbd/drbd_receiver.c60
-rw-r--r--drivers/block/drbd/drbd_req.c86
-rw-r--r--drivers/block/drbd/drbd_req.h6
-rw-r--r--drivers/block/drbd/drbd_state.c48
-rw-r--r--drivers/block/drbd/drbd_state.h8
-rw-r--r--drivers/block/drbd/drbd_worker.c48
-rw-r--r--drivers/block/floppy.c2
-rw-r--r--drivers/block/loop.c249
-rw-r--r--drivers/block/loop.h9
-rw-r--r--drivers/block/nbd.c15
-rw-r--r--drivers/block/null_blk.c1311
-rw-r--r--drivers/block/pktcdvd.c11
-rw-r--r--drivers/block/ps3vram.c10
-rw-r--r--drivers/block/rbd.c2
-rw-r--r--drivers/block/rsxx/dev.c6
-rw-r--r--drivers/block/skd_main.c3164
-rw-r--r--drivers/block/skd_s1120.h38
-rw-r--r--drivers/block/virtio_blk.c18
-rw-r--r--drivers/block/xen-blkback/blkback.c9
-rw-r--r--drivers/block/xen-blkback/xenbus.c13
-rw-r--r--drivers/block/xen-blkfront.c8
-rw-r--r--drivers/block/zram/Kconfig12
-rw-r--r--drivers/block/zram/zram_drv.c558
-rw-r--r--drivers/block/zram/zram_drv.h11
-rw-r--r--drivers/bluetooth/Kconfig2
-rw-r--r--drivers/bluetooth/ath3k.c3
-rw-r--r--drivers/bluetooth/bluecard_cs.c58
-rw-r--r--drivers/bluetooth/bt3c_cs.c8
-rw-r--r--drivers/bluetooth/btbcm.c69
-rw-r--r--drivers/bluetooth/btmrvl_sdio.c6
-rw-r--r--drivers/bluetooth/btqca.c2
-rw-r--r--drivers/bluetooth/btrtl.c2
-rw-r--r--drivers/bluetooth/btsdio.c3
-rw-r--r--drivers/bluetooth/btuart_cs.c8
-rw-r--r--drivers/bluetooth/btusb.c68
-rw-r--r--drivers/bluetooth/btwilink.c8
-rw-r--r--drivers/bluetooth/hci_bcm.c133
-rw-r--r--drivers/bluetooth/hci_h4.c2
-rw-r--r--drivers/bluetooth/hci_ldisc.c3
-rw-r--r--drivers/bluetooth/hci_ll.c11
-rw-r--r--drivers/bluetooth/hci_nokia.c10
-rw-r--r--drivers/bluetooth/hci_serdev.c13
-rw-r--r--drivers/bluetooth/hci_uart.h1
-rw-r--r--drivers/bus/Kconfig2
-rw-r--r--drivers/bus/arm-cci.c12
-rw-r--r--drivers/bus/imx-weim.c8
-rw-r--r--drivers/bus/omap-ocp2scp.c9
-rw-r--r--drivers/bus/sunxi-rsb.c22
-rw-r--r--drivers/char/Kconfig2
-rw-r--r--drivers/char/agp/ali-agp.c2
-rw-r--r--drivers/char/agp/amd-k7-agp.c4
-rw-r--r--drivers/char/agp/amd64-agp.c2
-rw-r--r--drivers/char/agp/ati-agp.c2
-rw-r--r--drivers/char/agp/efficeon-agp.c2
-rw-r--r--drivers/char/agp/intel-agp.c2
-rw-r--r--drivers/char/agp/nvidia-agp.c2
-rw-r--r--drivers/char/agp/sis-agp.c2
-rw-r--r--drivers/char/agp/uninorth-agp.c2
-rw-r--r--drivers/char/applicom.c2
-rw-r--r--drivers/char/hw_random/Kconfig20
-rw-r--r--drivers/char/hw_random/Makefile1
-rw-r--r--drivers/char/hw_random/core.c42
-rw-r--r--drivers/char/hw_random/imx-rngc.c331
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c12
-rw-r--r--drivers/char/mwave/smapi.c48
-rw-r--r--drivers/char/ppdev.c3
-rw-r--r--drivers/char/sonypi.c2
-rw-r--r--drivers/char/tlclk.c2
-rw-r--r--drivers/char/tpm/tpm-chip.c11
-rw-r--r--drivers/char/virtio_console.c7
-rw-r--r--drivers/char/xilinx_hwicap/xilinx_hwicap.c39
-rw-r--r--drivers/char/xilinx_hwicap/xilinx_hwicap.h13
-rw-r--r--drivers/clk/Kconfig17
-rw-r--r--drivers/clk/Makefile3
-rw-r--r--drivers/clk/at91/Makefile1
-rw-r--r--drivers/clk/at91/clk-audio-pll.c536
-rw-r--r--drivers/clk/at91/clk-generated.c101
-rw-r--r--drivers/clk/axs10x/Makefile1
-rw-r--r--drivers/clk/axs10x/pll_clock.c346
-rw-r--r--drivers/clk/berlin/bg2.c3
-rw-r--r--drivers/clk/berlin/bg2q.c7
-rw-r--r--drivers/clk/clk-asm9260.c4
-rw-r--r--drivers/clk/clk-conf.c16
-rw-r--r--drivers/clk/clk-cs2000-cp.c14
-rw-r--r--drivers/clk/clk-divider.c6
-rw-r--r--drivers/clk/clk-fractional-divider.c28
-rw-r--r--drivers/clk/clk-gate.c3
-rw-r--r--drivers/clk/clk-gemini.c7
-rw-r--r--drivers/clk/clk-hsdk-pll.c431
-rw-r--r--drivers/clk/clk-mb86s7x.c390
-rw-r--r--drivers/clk/clk-moxart.c16
-rw-r--r--drivers/clk/clk-qoriq.c26
-rw-r--r--drivers/clk/clk-si5351.c12
-rw-r--r--drivers/clk/clk-stm32f4.c4
-rw-r--r--drivers/clk/clk-stm32h7.c1410
-rw-r--r--drivers/clk/clk-versaclock5.c172
-rw-r--r--drivers/clk/clk-xgene.c15
-rw-r--r--drivers/clk/clk.c4
-rw-r--r--drivers/clk/clkdev.c4
-rw-r--r--drivers/clk/hisilicon/clk-hi6220.c6
-rw-r--r--drivers/clk/imx/clk-imx51-imx53.c8
-rw-r--r--drivers/clk/imx/clk-imx6sl.c6
-rw-r--r--drivers/clk/imx/clk-imx6sx.c6
-rw-r--r--drivers/clk/imx/clk-imx6ul.c6
-rw-r--r--drivers/clk/imx/clk-imx7d.c4
-rw-r--r--drivers/clk/imx/clk-vf610.c2
-rw-r--r--drivers/clk/mediatek/clk-cpumux.c6
-rw-r--r--drivers/clk/mediatek/clk-mtk.c2
-rw-r--r--drivers/clk/mediatek/reset.c2
-rw-r--r--drivers/clk/meson/Kconfig1
-rw-r--r--drivers/clk/meson/Makefile2
-rw-r--r--drivers/clk/meson/gxbb-aoclk-32k.c194
-rw-r--r--drivers/clk/meson/gxbb-aoclk-regmap.c46
-rw-r--r--drivers/clk/meson/gxbb-aoclk.c65
-rw-r--r--drivers/clk/meson/gxbb-aoclk.h42
-rw-r--r--drivers/clk/meson/gxbb.c189
-rw-r--r--drivers/clk/meson/gxbb.h125
-rw-r--r--drivers/clk/meson/meson8b.c160
-rw-r--r--drivers/clk/meson/meson8b.h112
-rw-r--r--drivers/clk/mmp/clk.c2
-rw-r--r--drivers/clk/nxp/clk-lpc32xx.c12
-rw-r--r--drivers/clk/qcom/clk-smd-rpm.c2
-rw-r--r--drivers/clk/qcom/gcc-msm8916.c2
-rw-r--r--drivers/clk/qcom/gcc-msm8996.c28
-rw-r--r--drivers/clk/renesas/Kconfig48
-rw-r--r--drivers/clk/renesas/Makefile2
-rw-r--r--drivers/clk/renesas/clk-div6.c3
-rw-r--r--drivers/clk/renesas/clk-mstp.c2
-rw-r--r--drivers/clk/renesas/clk-rcar-gen2.c3
-rw-r--r--drivers/clk/renesas/r8a7792-cpg-mssr.c7
-rw-r--r--drivers/clk/renesas/r8a7795-cpg-mssr.c34
-rw-r--r--drivers/clk/renesas/r8a7796-cpg-mssr.c35
-rw-r--r--drivers/clk/renesas/r8a77995-cpg-mssr.c236
-rw-r--r--drivers/clk/renesas/rcar-gen3-cpg.c69
-rw-r--r--drivers/clk/renesas/rcar-gen3-cpg.h15
-rw-r--r--drivers/clk/renesas/rcar-usb2-clock-sel.c188
-rw-r--r--drivers/clk/renesas/renesas-cpg-mssr.c6
-rw-r--r--drivers/clk/renesas/renesas-cpg-mssr.h1
-rw-r--r--drivers/clk/rockchip/clk-rk3128.c69
-rw-r--r--drivers/clk/rockchip/clk-rk3228.c2
-rw-r--r--drivers/clk/rockchip/clk-rv1108.c462
-rw-r--r--drivers/clk/rockchip/clk.c36
-rw-r--r--drivers/clk/samsung/clk-exynos-audss.c8
-rw-r--r--drivers/clk/samsung/clk-exynos5420.c23
-rw-r--r--drivers/clk/sunxi-ng/Kconfig18
-rw-r--r--drivers/clk/sunxi-ng/Makefile3
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun4i-a10.c1456
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun4i-a10.h61
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun5i.c3
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun6i-a31.c3
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-a23.c3
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-a33.c3
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-a83t.c10
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-h3.c16
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-r.c3
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-r.h2
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-r40.c1290
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-r40.h69
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-v3s.c3
-rw-r--r--drivers/clk/sunxi-ng/ccu_common.h4
-rw-r--r--drivers/clk/sunxi-ng/ccu_div.c22
-rw-r--r--drivers/clk/sunxi-ng/ccu_div.h3
-rw-r--r--drivers/clk/sunxi-ng/ccu_frac.c14
-rw-r--r--drivers/clk/sunxi-ng/ccu_frac.h2
-rw-r--r--drivers/clk/sunxi-ng/ccu_mmc_timing.c70
-rw-r--r--drivers/clk/sunxi-ng/ccu_mp.c80
-rw-r--r--drivers/clk/sunxi-ng/ccu_mp.h30
-rw-r--r--drivers/clk/sunxi-ng/ccu_mult.c10
-rw-r--r--drivers/clk/sunxi-ng/ccu_nkm.c22
-rw-r--r--drivers/clk/sunxi-ng/ccu_nkm.h2
-rw-r--r--drivers/clk/sunxi-ng/ccu_nm.c19
-rw-r--r--drivers/clk/sunxi-ng/ccu_reset.c12
-rw-r--r--drivers/clk/sunxi/clk-sun8i-bus-gates.c4
-rw-r--r--drivers/clk/sunxi/clk-sunxi.c17
-rw-r--r--drivers/clk/tegra/clk-emc.c12
-rw-r--r--drivers/clk/tegra/clk-pll.c159
-rw-r--r--drivers/clk/tegra/clk-tegra-periph.c3
-rw-r--r--drivers/clk/tegra/clk-tegra-super-gen4.c11
-rw-r--r--drivers/clk/tegra/clk-tegra210.c32
-rw-r--r--drivers/clk/tegra/clk.h6
-rw-r--r--drivers/clk/ti/adpll.c4
-rw-r--r--drivers/clk/ti/apll.c2
-rw-r--r--drivers/clk/ti/clockdomain.c4
-rw-r--r--drivers/clk/ti/fapll.c4
-rw-r--r--drivers/clk/uniphier/clk-uniphier-core.c26
-rw-r--r--drivers/clk/uniphier/clk-uniphier-mio.c4
-rw-r--r--drivers/clk/uniphier/clk-uniphier-sys.c98
-rw-r--r--drivers/clk/uniphier/clk-uniphier.h4
-rw-r--r--drivers/clk/ux500/clk-prcc.c6
-rw-r--r--drivers/clk/ux500/clk-prcmu.c14
-rw-r--r--drivers/clk/ux500/clk-sysctrl.c8
-rw-r--r--drivers/clk/versatile/clk-vexpress-osc.c2
-rw-r--r--drivers/clk/zte/clk-zx296718.c6
-rw-r--r--drivers/clocksource/Kconfig10
-rw-r--r--drivers/clocksource/Makefile1
-rw-r--r--drivers/clocksource/arm_arch_timer.c8
-rw-r--r--drivers/clocksource/bcm2835_timer.c1
-rw-r--r--drivers/clocksource/em_sti.c11
-rw-r--r--drivers/clocksource/mips-gic-timer.c37
-rw-r--r--drivers/clocksource/tango_xtal.c6
-rw-r--r--drivers/clocksource/timer-imx-tpm.c239
-rw-r--r--drivers/clocksource/timer-of.c15
-rw-r--r--drivers/clocksource/timer-probe.c3
-rw-r--r--drivers/clocksource/timer-stm32.c8
-rw-r--r--drivers/cpufreq/Kconfig.arm21
-rw-r--r--drivers/cpufreq/Makefile4
-rw-r--r--drivers/cpufreq/arm_big_little.c10
-rw-r--r--drivers/cpufreq/cppc_cpufreq.c1
-rw-r--r--drivers/cpufreq/cpufreq-dt-platdev.c65
-rw-r--r--drivers/cpufreq/cpufreq-dt.c1
-rw-r--r--drivers/cpufreq/cpufreq-nforce2.c2
-rw-r--r--drivers/cpufreq/cpufreq.c41
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c6
-rw-r--r--drivers/cpufreq/cpufreq_governor.c20
-rw-r--r--drivers/cpufreq/cpufreq_governor.h3
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c12
-rw-r--r--drivers/cpufreq/dbx500-cpufreq.c103
-rw-r--r--drivers/cpufreq/elanfreq.c4
-rw-r--r--drivers/cpufreq/gx-suspmod.c2
-rw-r--r--drivers/cpufreq/imx6q-cpufreq.c9
-rw-r--r--drivers/cpufreq/intel_pstate.c389
-rw-r--r--drivers/cpufreq/longrun.c1
-rw-r--r--drivers/cpufreq/loongson2_cpufreq.c2
-rw-r--r--drivers/cpufreq/mediatek-cpufreq.c (renamed from drivers/cpufreq/mt8173-cpufreq.c)29
-rw-r--r--drivers/cpufreq/pmac32-cpufreq.c7
-rw-r--r--drivers/cpufreq/pmac64-cpufreq.c2
-rw-r--r--drivers/cpufreq/powernow-k7.c2
-rw-r--r--drivers/cpufreq/s5pv210-cpufreq.c3
-rw-r--r--drivers/cpufreq/sa1100-cpufreq.c5
-rw-r--r--drivers/cpufreq/sa1110-cpufreq.c5
-rw-r--r--drivers/cpufreq/sh-cpufreq.c3
-rw-r--r--drivers/cpufreq/speedstep-ich.c2
-rw-r--r--drivers/cpufreq/speedstep-lib.c4
-rw-r--r--drivers/cpufreq/speedstep-smi.c2
-rw-r--r--drivers/cpufreq/sti-cpufreq.c8
-rw-r--r--drivers/cpufreq/tango-cpufreq.c38
-rw-r--r--drivers/cpufreq/ti-cpufreq.c4
-rw-r--r--drivers/cpufreq/unicore2-cpufreq.c3
-rw-r--r--drivers/cpuidle/Makefile1
-rw-r--r--drivers/cpuidle/coupled.c10
-rw-r--r--drivers/cpuidle/cpuidle-cps.c2
-rw-r--r--drivers/cpuidle/cpuidle.c18
-rw-r--r--drivers/cpuidle/driver.c32
-rw-r--r--drivers/cpuidle/dt_idle_states.c24
-rw-r--r--drivers/cpuidle/governors/ladder.c14
-rw-r--r--drivers/cpuidle/governors/menu.c13
-rw-r--r--drivers/cpuidle/poll_state.c37
-rw-r--r--drivers/crypto/Kconfig49
-rw-r--r--drivers/crypto/Makefile4
-rw-r--r--drivers/crypto/atmel-ecc.c781
-rw-r--r--drivers/crypto/atmel-ecc.h128
-rw-r--r--drivers/crypto/atmel-sha.c2
-rw-r--r--drivers/crypto/atmel-tdes.c2
-rw-r--r--drivers/crypto/axis/Makefile1
-rw-r--r--drivers/crypto/axis/artpec6_crypto.c3192
-rw-r--r--drivers/crypto/bcm/cipher.c114
-rw-r--r--drivers/crypto/bcm/cipher.h13
-rw-r--r--drivers/crypto/caam/caamalg.c66
-rw-r--r--drivers/crypto/caam/caamalg_desc.c5
-rw-r--r--drivers/crypto/caam/caamalg_qi.c55
-rw-r--r--drivers/crypto/caam/caamhash.c7
-rw-r--r--drivers/crypto/caam/caamrng.c6
-rw-r--r--drivers/crypto/caam/ctrl.c127
-rw-r--r--drivers/crypto/caam/ctrl.h2
-rw-r--r--drivers/crypto/caam/error.c40
-rw-r--r--drivers/crypto/caam/error.h4
-rw-r--r--drivers/crypto/caam/intern.h11
-rw-r--r--drivers/crypto/caam/jr.c7
-rw-r--r--drivers/crypto/caam/qi.c30
-rw-r--r--drivers/crypto/caam/qi.h3
-rw-r--r--drivers/crypto/caam/regs.h1
-rw-r--r--drivers/crypto/caam/sg_sw_qm2.h81
-rw-r--r--drivers/crypto/caam/sg_sw_sec4.h43
-rw-r--r--drivers/crypto/cavium/cpt/cptpf_main.c13
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_main.c4
-rw-r--r--drivers/crypto/ccp/Kconfig22
-rw-r--r--drivers/crypto/ccp/Makefile7
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes-galois.c2
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes-xts.c96
-rw-r--r--drivers/crypto/ccp/ccp-crypto-des3.c2
-rw-r--r--drivers/crypto/ccp/ccp-crypto-main.c21
-rw-r--r--drivers/crypto/ccp/ccp-crypto-rsa.c299
-rw-r--r--drivers/crypto/ccp/ccp-crypto-sha.c2
-rw-r--r--drivers/crypto/ccp/ccp-crypto.h36
-rw-r--r--drivers/crypto/ccp/ccp-debugfs.c15
-rw-r--r--drivers/crypto/ccp/ccp-dev-v3.c20
-rw-r--r--drivers/crypto/ccp/ccp-dev-v5.c28
-rw-r--r--drivers/crypto/ccp/ccp-dev.c134
-rw-r--r--drivers/crypto/ccp/ccp-dev.h30
-rw-r--r--drivers/crypto/ccp/ccp-dmaengine.c25
-rw-r--r--drivers/crypto/ccp/ccp-ops.c133
-rw-r--r--drivers/crypto/ccp/ccp-pci.c356
-rw-r--r--drivers/crypto/ccp/ccp-platform.c293
-rw-r--r--drivers/crypto/ccp/sp-dev.c277
-rw-r--r--drivers/crypto/ccp/sp-dev.h133
-rw-r--r--drivers/crypto/ccp/sp-pci.c276
-rw-r--r--drivers/crypto/ccp/sp-platform.c256
-rw-r--r--drivers/crypto/geode-aes.c17
-rw-r--r--drivers/crypto/img-hash.c2
-rw-r--r--drivers/crypto/inside-secure/safexcel.c5
-rw-r--r--drivers/crypto/ixp4xx_crypto.c6
-rw-r--r--drivers/crypto/mediatek/mtk-platform.c2
-rw-r--r--drivers/crypto/mxc-scc.c4
-rw-r--r--drivers/crypto/mxs-dcp.c8
-rw-r--r--drivers/crypto/n2_core.c60
-rw-r--r--drivers/crypto/nx/Kconfig1
-rw-r--r--drivers/crypto/nx/nx-842-powernv.c514
-rw-r--r--drivers/crypto/nx/nx-842.c2
-rw-r--r--drivers/crypto/nx/nx-842.h13
-rw-r--r--drivers/crypto/omap-aes.c1
-rw-r--r--drivers/crypto/omap-des.c3
-rw-r--r--drivers/crypto/omap-sham.c2
-rw-r--r--drivers/crypto/qat/qat_common/adf_aer.c2
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto.c74
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto.h15
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c103
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto_ahash.c119
-rw-r--r--drivers/crypto/sahara.c14
-rw-r--r--drivers/crypto/stm32/Kconfig19
-rw-r--r--drivers/crypto/stm32/Makefile4
-rw-r--r--drivers/crypto/stm32/stm32-hash.c1575
-rw-r--r--drivers/crypto/stm32/stm32_crc32.c17
-rw-r--r--drivers/crypto/sunxi-ss/Makefile1
-rw-r--r--drivers/crypto/sunxi-ss/sun4i-ss-core.c30
-rw-r--r--drivers/crypto/sunxi-ss/sun4i-ss-prng.c56
-rw-r--r--drivers/crypto/sunxi-ss/sun4i-ss.h11
-rw-r--r--drivers/crypto/virtio/virtio_crypto_algs.c109
-rw-r--r--drivers/crypto/virtio/virtio_crypto_common.h22
-rw-r--r--drivers/crypto/virtio/virtio_crypto_core.c37
-rw-r--r--drivers/crypto/vmx/aes_ctr.c3
-rw-r--r--drivers/dax/super.c33
-rw-r--r--drivers/devfreq/Kconfig1
-rw-r--r--drivers/devfreq/devfreq-event.c4
-rw-r--r--drivers/devfreq/devfreq.c5
-rw-r--r--drivers/devfreq/governor.h4
-rw-r--r--drivers/dma-buf/dma-fence.c4
-rw-r--r--drivers/dma-buf/reservation.c99
-rw-r--r--drivers/dma-buf/sw_sync.c201
-rw-r--r--drivers/dma-buf/sync_debug.c19
-rw-r--r--drivers/dma-buf/sync_debug.h26
-rw-r--r--drivers/dma/Kconfig6
-rw-r--r--drivers/dma/Makefile1
-rw-r--r--drivers/dma/altera-msgdma.c927
-rw-r--r--drivers/dma/amba-pl08x.c2
-rw-r--r--drivers/dma/at_hdmac.c140
-rw-r--r--drivers/dma/at_xdmac.c13
-rw-r--r--drivers/dma/bcm-sba-raid.c544
-rw-r--r--drivers/dma/dmaengine.c103
-rw-r--r--drivers/dma/dmatest.c110
-rw-r--r--drivers/dma/fsldma.c118
-rw-r--r--drivers/dma/ioat/dma.c10
-rw-r--r--drivers/dma/ioat/dma.h3
-rw-r--r--drivers/dma/ioat/init.c2
-rw-r--r--drivers/dma/ioat/sysfs.c42
-rw-r--r--drivers/dma/k3dma.c12
-rw-r--r--drivers/dma/mv_xor.c162
-rw-r--r--drivers/dma/nbpfaxi.c17
-rw-r--r--drivers/dma/of-dma.c8
-rw-r--r--drivers/dma/pl330.c2
-rw-r--r--drivers/dma/ppc4xx/adma.c37
-rw-r--r--drivers/dma/qcom/bam_dma.c6
-rw-r--r--drivers/dma/qcom/hidma.c37
-rw-r--r--drivers/dma/qcom/hidma.h7
-rw-r--r--drivers/dma/qcom/hidma_ll.c11
-rw-r--r--drivers/dma/qcom/hidma_mgmt.c16
-rw-r--r--drivers/dma/sh/rcar-dmac.c85
-rw-r--r--drivers/dma/ste_dma40.c22
-rw-r--r--drivers/dma/sun6i-dma.c33
-rw-r--r--drivers/dma/tegra210-adma.c4
-rw-r--r--drivers/dma/ti-dma-crossbar.c2
-rw-r--r--drivers/dma/xgene-dma.c160
-rw-r--r--drivers/dma/xilinx/xilinx_dma.c30
-rw-r--r--drivers/dma/xilinx/zynqmp_dma.c94
-rw-r--r--drivers/edac/altera_edac.c6
-rw-r--r--drivers/edac/amd64_edac.c1
-rw-r--r--drivers/edac/amd76x_edac.c2
-rw-r--r--drivers/edac/cpc925_edac.c3
-rw-r--r--drivers/edac/e752x_edac.c2
-rw-r--r--drivers/edac/e7xxx_edac.c2
-rw-r--r--drivers/edac/edac_mc_sysfs.c14
-rw-r--r--drivers/edac/ghes_edac.c3
-rw-r--r--drivers/edac/highbank_mc_edac.c1
-rw-r--r--drivers/edac/i3000_edac.c3
-rw-r--r--drivers/edac/i3200_edac.c3
-rw-r--r--drivers/edac/i5000_edac.c1
-rw-r--r--drivers/edac/i5100_edac.c1
-rw-r--r--drivers/edac/i5400_edac.c1
-rw-r--r--drivers/edac/i7300_edac.c1
-rw-r--r--drivers/edac/i7core_edac.c9
-rw-r--r--drivers/edac/i82443bxgx_edac.c3
-rw-r--r--drivers/edac/i82860_edac.c2
-rw-r--r--drivers/edac/i82875p_edac.c2
-rw-r--r--drivers/edac/i82975x_edac.c2
-rw-r--r--drivers/edac/ie31200_edac.c2
-rw-r--r--drivers/edac/mce_amd.c36
-rw-r--r--drivers/edac/mv64x60_edac.c1
-rw-r--r--drivers/edac/pnd2_edac.c106
-rw-r--r--drivers/edac/ppc4xx_edac.c9
-rw-r--r--drivers/edac/r82600_edac.c2
-rw-r--r--drivers/edac/sb_edac.c64
-rw-r--r--drivers/edac/skx_edac.c3
-rw-r--r--drivers/edac/synopsys_edac.c1
-rw-r--r--drivers/edac/thunderx_edac.c6
-rw-r--r--drivers/edac/x38_edac.c3
-rw-r--r--drivers/edac/xgene_edac.c1
-rw-r--r--drivers/extcon/Kconfig7
-rw-r--r--drivers/extcon/Makefile1
-rw-r--r--drivers/extcon/devres.c50
-rw-r--r--drivers/extcon/extcon-intel-int3496.c2
-rw-r--r--drivers/extcon/extcon-max77693.c5
-rw-r--r--drivers/extcon/extcon-usbc-cros-ec.c417
-rw-r--r--drivers/extcon/extcon.c279
-rw-r--r--drivers/firmware/arm_scpi.c4
-rw-r--r--drivers/firmware/dcdbas.c2
-rw-r--r--drivers/firmware/dmi-sysfs.c5
-rw-r--r--drivers/firmware/efi/Kconfig10
-rw-r--r--drivers/firmware/efi/apple-properties.c5
-rw-r--r--drivers/firmware/efi/arm-init.c8
-rw-r--r--drivers/firmware/efi/cper.c22
-rw-r--r--drivers/firmware/efi/efi-bgrt.c22
-rw-r--r--drivers/firmware/efi/efi.c75
-rw-r--r--drivers/firmware/efi/esrt.c2
-rw-r--r--drivers/firmware/efi/libstub/Makefile3
-rw-r--r--drivers/firmware/efi/libstub/arm-stub.c3
-rw-r--r--drivers/firmware/efi/libstub/arm64-stub.c16
-rw-r--r--drivers/firmware/efi/libstub/efi-stub-helper.c4
-rw-r--r--drivers/firmware/efi/libstub/random.c10
-rw-r--r--drivers/firmware/efi/libstub/tpm.c58
-rw-r--r--drivers/firmware/efi/reboot.c12
-rw-r--r--drivers/firmware/google/gsmi.c2
-rw-r--r--drivers/firmware/google/memconsole-x86-legacy.c2
-rw-r--r--drivers/firmware/google/vpd.c10
-rw-r--r--drivers/firmware/pcdp.c4
-rw-r--r--drivers/firmware/psci.c4
-rw-r--r--drivers/firmware/tegra/bpmp.c4
-rw-r--r--drivers/fmc/Makefile1
-rw-r--r--drivers/fmc/fmc-chardev.c3
-rw-r--r--drivers/fmc/fmc-core.c95
-rw-r--r--drivers/fmc/fmc-debug.c173
-rw-r--r--drivers/fmc/fmc-dump.c41
-rw-r--r--drivers/fmc/fmc-match.c2
-rw-r--r--drivers/fmc/fmc-private.h9
-rw-r--r--drivers/fmc/fmc-sdb.c119
-rw-r--r--drivers/fmc/fmc-trivial.c20
-rw-r--r--drivers/fmc/fmc-write-eeprom.c8
-rw-r--r--drivers/fmc/fru-parse.c3
-rw-r--r--drivers/fpga/Kconfig20
-rw-r--r--drivers/fpga/Makefile2
-rw-r--r--drivers/fpga/altera-cvp.c500
-rw-r--r--drivers/fpga/altera-hps2fpga.c12
-rw-r--r--drivers/fpga/altera-ps-spi.c308
-rw-r--r--drivers/fpga/fpga-region.c4
-rw-r--r--drivers/fsi/fsi-core.c4
-rw-r--r--drivers/fsi/fsi-scom.c10
-rw-r--r--drivers/gpio/Kconfig26
-rw-r--r--drivers/gpio/Makefile2
-rw-r--r--drivers/gpio/devres.c3
-rw-r--r--drivers/gpio/gpio-74x164.c10
-rw-r--r--drivers/gpio/gpio-altera-a10sr.c2
-rw-r--r--drivers/gpio/gpio-altera.c4
-rw-r--r--drivers/gpio/gpio-aspeed.c2
-rw-r--r--drivers/gpio/gpio-brcmstb.c11
-rw-r--r--drivers/gpio/gpio-davinci.c22
-rw-r--r--drivers/gpio/gpio-ge.c6
-rw-r--r--drivers/gpio/gpio-grgpio.c2
-rw-r--r--drivers/gpio/gpio-it87.c3
-rw-r--r--drivers/gpio/gpio-max77620.c2
-rw-r--r--drivers/gpio/gpio-mb86s7x.c4
-rw-r--r--drivers/gpio/gpio-ml-ioh.c12
-rw-r--r--drivers/gpio/gpio-mockup.c79
-rw-r--r--drivers/gpio/gpio-mpc8xxx.c4
-rw-r--r--drivers/gpio/gpio-msic.c4
-rw-r--r--drivers/gpio/gpio-mvebu.c2
-rw-r--r--drivers/gpio/gpio-mxc.c27
-rw-r--r--drivers/gpio/gpio-mxs.c15
-rw-r--r--drivers/gpio/gpio-omap.c2
-rw-r--r--drivers/gpio/gpio-pca953x.c8
-rw-r--r--drivers/gpio/gpio-pch.c12
-rw-r--r--drivers/gpio/gpio-pl061.c2
-rw-r--r--drivers/gpio/gpio-pxa.c8
-rw-r--r--drivers/gpio/gpio-rcar.c10
-rw-r--r--drivers/gpio/gpio-sta2x11.c14
-rw-r--r--drivers/gpio/gpio-tb10x.c3
-rw-r--r--drivers/gpio/gpio-tegra.c129
-rw-r--r--drivers/gpio/gpio-thunderx.c639
-rw-r--r--drivers/gpio/gpio-tps68470.c176
-rw-r--r--drivers/gpio/gpio-twl4030.c2
-rw-r--r--drivers/gpio/gpio-twl6040.c2
-rw-r--r--drivers/gpio/gpio-tz1090.c10
-rw-r--r--drivers/gpio/gpio-vf610.c47
-rw-r--r--drivers/gpio/gpio-xilinx.c4
-rw-r--r--drivers/gpio/gpio-zevio.c2
-rw-r--r--drivers/gpio/gpio-zynq.c160
-rw-r--r--drivers/gpio/gpiolib-acpi.c4
-rw-r--r--drivers/gpio/gpiolib-of.c36
-rw-r--r--drivers/gpio/gpiolib-sysfs.c18
-rw-r--r--drivers/gpio/gpiolib.c130
-rw-r--r--drivers/gpio/gpiolib.h2
-rw-r--r--drivers/gpu/drm/Makefile2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h221
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c72
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c44
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c189
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c226
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c161
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c138
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c257
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c69
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h76
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c64
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c87
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c39
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c39
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c278
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h68
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c63
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c69
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h40
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_test.c56
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h82
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c504
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vf_error.c85
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vf_error.h62
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c533
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h41
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c77
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_sdma.c60
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c168
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c113
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c152
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c115
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_virtual.c125
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c109
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c243
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c184
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c152
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c95
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c96
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c108
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c154
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c108
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c46
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_vi.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v6_1.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v10_0.c96
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v10_0.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v3_1.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c42
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c70
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si.c28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dpm.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c66
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15_common.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15d.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c80
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v4_0.c33
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c183
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c102
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c21
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.h27
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c123
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c318
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c8
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c8
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c40
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_events.c33
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c63
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_module.c10
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c62
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c46
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c294
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_pasid.c7
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers.h330
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_vi.h140
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h32
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c25
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c71
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_queue.c12
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c46
-rw-r--r--drivers/gpu/drm/amd/include/atomfirmware.h63
-rw-r--r--drivers/gpu/drm/amd/include/cgs_common.h6
-rw-r--r--drivers/gpu/drm/amd/include/kgd_kfd_interface.h30
-rw-r--r--drivers/gpu/drm/amd/include/vi_structs.h4
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c113
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c25
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c241
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h8
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c17
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c12
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c313
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h15
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c1291
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.h16
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c88
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c6
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h5
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/hwmgr.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/pp_debug.h6
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/pp_soc15.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h7
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu9.h13
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu9_driver_if.h9
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smumgr.h3
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/vega10_ppsmc.h4
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c19
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c184
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.h11
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c4
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c34
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.h12
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c9
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h8
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c11
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c30
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h9
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.c23
-rw-r--r--drivers/gpu/drm/arc/arcpgu_crtc.c43
-rw-r--r--drivers/gpu/drm/arc/arcpgu_drv.c61
-rw-r--r--drivers/gpu/drm/arm/hdlcd_crtc.c11
-rw-r--r--drivers/gpu/drm/arm/hdlcd_drv.c4
-rw-r--r--drivers/gpu/drm/arm/malidp_crtc.c10
-rw-r--r--drivers/gpu/drm/arm/malidp_drv.c4
-rw-r--r--drivers/gpu/drm/arm/malidp_planes.c3
-rw-r--r--drivers/gpu/drm/armada/armada_crtc.c20
-rw-r--r--drivers/gpu/drm/armada/armada_crtc.h2
-rw-r--r--drivers/gpu/drm/armada/armada_drv.c4
-rw-r--r--drivers/gpu/drm/armada/armada_fbdev.c3
-rw-r--r--drivers/gpu/drm/armada/armada_overlay.c3
-rw-r--r--drivers/gpu/drm/ast/ast_dp501.c25
-rw-r--r--drivers/gpu/drm/ast/ast_drv.c6
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h4
-rw-r--r--drivers/gpu/drm/ast/ast_fb.c23
-rw-r--r--drivers/gpu/drm/ast/ast_main.c13
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c48
-rw-r--r--drivers/gpu/drm/ast/ast_ttm.c19
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c17
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c32
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h16
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c45
-rw-r--r--drivers/gpu/drm/bochs/bochs_drv.c6
-rw-r--r--drivers/gpu/drm/bochs/bochs_fbdev.c7
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_drv.c11
-rw-r--r--drivers/gpu/drm/bridge/analogix-anx78xx.c10
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_core.c1
-rw-r--r--drivers/gpu/drm/bridge/dumb-vga-dac.c10
-rw-r--r--drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c1
-rw-r--r--drivers/gpu/drm/bridge/nxp-ptn3460.c7
-rw-r--r--drivers/gpu/drm/bridge/panel.c36
-rw-r--r--drivers/gpu/drm/bridge/parade-ps8622.c7
-rw-r--r--drivers/gpu/drm/bridge/sii902x.c9
-rw-r--r--drivers/gpu/drm/bridge/sil-sii8620.c4
-rw-r--r--drivers/gpu/drm/bridge/synopsys/Kconfig16
-rw-r--r--drivers/gpu/drm/bridge/synopsys/Makefile3
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c2
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-cec.c327
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-cec.h19
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c3
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi.c107
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi.h46
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c981
-rw-r--r--drivers/gpu/drm/bridge/tc358767.c7
-rw-r--r--drivers/gpu/drm/bridge/ti-tfp410.c7
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.c6
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.h8
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_fbdev.c5
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_main.c10
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_mode.c71
-rw-r--r--drivers/gpu/drm/drm_atomic.c230
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c611
-rw-r--r--drivers/gpu/drm/drm_blend.c2
-rw-r--r--drivers/gpu/drm/drm_color_mgmt.c3
-rw-r--r--drivers/gpu/drm/drm_connector.c7
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c3
-rw-r--r--drivers/gpu/drm/drm_crtc_internal.h7
-rw-r--r--drivers/gpu/drm/drm_debugfs_crc.c59
-rw-r--r--drivers/gpu/drm/drm_dp_dual_mode_helper.c2
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c89
-rw-r--r--drivers/gpu/drm/drm_drv.c56
-rw-r--r--drivers/gpu/drm/drm_dumb_buffers.c26
-rw-r--r--drivers/gpu/drm/drm_edid.c440
-rw-r--r--drivers/gpu/drm/drm_fb_cma_helper.c184
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c681
-rw-r--r--drivers/gpu/drm/drm_file.c9
-rw-r--r--drivers/gpu/drm/drm_framebuffer.c2
-rw-r--r--drivers/gpu/drm/drm_gem.c55
-rw-r--r--drivers/gpu/drm/drm_gem_cma_helper.c39
-rw-r--r--drivers/gpu/drm/drm_gem_framebuffer_helper.c283
-rw-r--r--drivers/gpu/drm/drm_internal.h20
-rw-r--r--drivers/gpu/drm/drm_ioc32.c2
-rw-r--r--drivers/gpu/drm/drm_ioctl.c23
-rw-r--r--drivers/gpu/drm/drm_mipi_dsi.c6
-rw-r--r--drivers/gpu/drm/drm_mm.c19
-rw-r--r--drivers/gpu/drm/drm_mode_config.c7
-rw-r--r--drivers/gpu/drm/drm_mode_object.c159
-rw-r--r--drivers/gpu/drm/drm_modes.c91
-rw-r--r--drivers/gpu/drm/drm_modeset_helper.c1
-rw-r--r--drivers/gpu/drm/drm_modeset_lock.c12
-rw-r--r--drivers/gpu/drm/drm_of.c4
-rw-r--r--drivers/gpu/drm/drm_pci.c40
-rw-r--r--drivers/gpu/drm/drm_plane.c121
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c4
-rw-r--r--drivers/gpu/drm/drm_property.c23
-rw-r--r--drivers/gpu/drm/drm_scdc_helper.c35
-rw-r--r--drivers/gpu/drm/drm_simple_kms_helper.c23
-rw-r--r--drivers/gpu/drm/drm_syncobj.c531
-rw-r--r--drivers/gpu/drm/drm_vblank.c187
-rw-r--r--drivers/gpu/drm/drm_vm.c6
-rw-r--r--drivers/gpu/drm/drm_vma_manager.c2
-rw-r--r--drivers/gpu/drm/etnaviv/Kconfig2
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.c8
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.c45
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c2
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c6
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.c8
-rw-r--r--drivers/gpu/drm/exynos/exynos5433_drm_decon.c124
-rw-r--r--drivers/gpu/drm/exynos/exynos7_drm_decon.c13
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp.c15
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_core.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.c43
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.h10
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dpi.c13
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c8
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dsi.c222
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c29
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c26
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c16
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c30
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.h5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_mic.c44
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_plane.c30
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c16
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c28
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c48
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c5
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c2
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c2
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c1
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.c22
-rw-r--r--drivers/gpu/drm/gma500/gem.c30
-rw-r--r--drivers/gpu/drm/gma500/gma_display.c32
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c4
-rw-r--r--drivers/gpu/drm/gma500/mdfld_intel_display.c4
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.c7
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.h2
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_display.c7
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_drv.h1
-rw-r--r--drivers/gpu/drm/gma500/tc35876x-dsi-lvds.c2
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c12
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c4
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c5
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c1
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c8
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c67
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c31
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c30
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.h2
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c12
-rw-r--r--drivers/gpu/drm/i810/i810_drv.c5
-rw-r--r--drivers/gpu/drm/i915/Kconfig1
-rw-r--r--drivers/gpu/drm/i915/Kconfig.debug1
-rw-r--r--drivers/gpu/drm/i915/Makefile1
-rw-r--r--drivers/gpu/drm/i915/gvt/aperture_gm.c4
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c10
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/display.c6
-rw-r--r--drivers/gpu/drm/i915/gvt/execlist.c15
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c128
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.h26
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h3
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c51
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c17
-rw-r--r--drivers/gpu/drm/i915/gvt/render.c54
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c113
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.h1
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c7
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c2
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c211
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c209
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h370
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c464
-rw-r--r--drivers/gpu/drm/i915/i915_gem_clflush.c3
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c290
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.h79
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c659
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c113
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h3
-rw-r--r--drivers/gpu/drm/i915/i915_gem_internal.c7
-rw-r--r--drivers/gpu/drm/i915/i915_gem_object.c48
-rw-r--r--drivers/gpu/drm/i915/i915_gem_object.h32
-rw-r--r--drivers/gpu/drm/i915/i915_gem_render_state.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_request.c104
-rw-r--r--drivers/gpu/drm/i915/i915_gem_request.h2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_shrinker.c24
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c8
-rw-r--r--drivers/gpu/drm/i915/i915_gem_userptr.c14
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c17
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c377
-rw-r--r--drivers/gpu/drm/i915/i915_oa_bdw.c5310
-rw-r--r--drivers/gpu/drm/i915/i915_oa_bdw.h8
-rw-r--r--drivers/gpu/drm/i915/i915_oa_bxt.c2624
-rw-r--r--drivers/gpu/drm/i915/i915_oa_bxt.h8
-rw-r--r--drivers/gpu/drm/i915/i915_oa_chv.c2808
-rw-r--r--drivers/gpu/drm/i915/i915_oa_chv.h8
-rw-r--r--drivers/gpu/drm/i915/i915_oa_glk.c2536
-rw-r--r--drivers/gpu/drm/i915/i915_oa_glk.h8
-rw-r--r--drivers/gpu/drm/i915/i915_oa_hsw.c765
-rw-r--r--drivers/gpu/drm/i915/i915_oa_hsw.h8
-rw-r--r--drivers/gpu/drm/i915/i915_oa_kblgt2.c2924
-rw-r--r--drivers/gpu/drm/i915/i915_oa_kblgt2.h8
-rw-r--r--drivers/gpu/drm/i915/i915_oa_kblgt3.c2973
-rw-r--r--drivers/gpu/drm/i915/i915_oa_kblgt3.h8
-rw-r--r--drivers/gpu/drm/i915/i915_oa_sklgt2.c3413
-rw-r--r--drivers/gpu/drm/i915/i915_oa_sklgt2.h8
-rw-r--r--drivers/gpu/drm/i915/i915_oa_sklgt3.c2972
-rw-r--r--drivers/gpu/drm/i915/i915_oa_sklgt3.h8
-rw-r--r--drivers/gpu/drm/i915/i915_oa_sklgt4.c3026
-rw-r--r--drivers/gpu/drm/i915/i915_oa_sklgt4.h8
-rw-r--r--drivers/gpu/drm/i915/i915_params.c10
-rw-r--r--drivers/gpu/drm/i915/i915_params.h3
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c7
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c809
-rw-r--r--drivers/gpu/drm/i915/i915_pvinfo.h8
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h521
-rw-r--r--drivers/gpu/drm/i915/i915_selftest.h2
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c16
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h4
-rw-r--r--drivers/gpu/drm/i915/i915_vgpu.c7
-rw-r--r--drivers/gpu/drm/i915/i915_vgpu.h3
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c31
-rw-r--r--drivers/gpu/drm/i915/i915_vma.h12
-rw-r--r--drivers/gpu/drm/i915/intel_atomic_plane.c15
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c24
-rw-r--r--drivers/gpu/drm/i915/intel_color.c47
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c2
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c121
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.c2
-rw-r--r--drivers/gpu/drm/i915/intel_display.c1814
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c218
-rw-r--r--drivers/gpu/drm/i915/intel_dp_aux_backlight.c98
-rw-r--r--drivers/gpu/drm/i915/intel_dp_link_training.c8
-rw-r--r--drivers/gpu/drm/i915/intel_dp_mst.c52
-rw-r--r--drivers/gpu/drm/i915/intel_dpll_mgr.c11
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h57
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.c4
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_dcs_backlight.c2
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_vbt.c2
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c4
-rw-r--r--drivers/gpu/drm/i915/intel_engine_cs.c45
-rw-r--r--drivers/gpu/drm/i915/intel_fbc.c8
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c72
-rw-r--r--drivers/gpu/drm/i915/intel_fifo_underrun.c26
-rw-r--r--drivers/gpu/drm/i915/intel_hangcheck.c2
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c204
-rw-r--r--drivers/gpu/drm/i915/intel_hotplug.c57
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c36
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c61
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.h1
-rw-r--r--drivers/gpu/drm/i915/intel_lspcon.c4
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c5
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c45
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c11
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c9
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c245
-rw-r--r--drivers/gpu/drm/i915/intel_psr.c1
-rw-r--r--drivers/gpu/drm/i915/intel_renderstate_gen9.c4
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c8
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h13
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c1028
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c81
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c161
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c2
-rw-r--r--drivers/gpu/drm/i915/intel_uc.c4
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c22
-rw-r--r--drivers/gpu/drm/i915/selftests/huge_gem_object.c6
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_coherency.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_context.c6
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_gtt.c10
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_random.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_vma.c8
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c10
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_hangcheck.c343
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_uncore.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_context.c36
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_context.h5
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_engine.c8
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_engine.h3
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gem_device.c30
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gtt.c3
-rw-r--r--drivers/gpu/drm/imx/imx-drm-core.c10
-rw-r--r--drivers/gpu/drm/imx/imx-ldb.c1
-rw-r--r--drivers/gpu/drm/imx/imx-tve.c1
-rw-r--r--drivers/gpu/drm/imx/ipuv3-crtc.c5
-rw-r--r--drivers/gpu/drm/imx/ipuv3-plane.c63
-rw-r--r--drivers/gpu/drm/imx/parallel-display.c1
-rw-r--r--drivers/gpu/drm/lib/drm_random.c2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_color.c4
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_ovl.c4
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_rdma.c4
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dpi.c6
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_crtc.c17
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c6
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.c32
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_fb.c4
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_gem.c27
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_gem.h3
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_plane.c2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dsi.c5
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi.c11
-rw-r--r--drivers/gpu/drm/meson/meson_crtc.c10
-rw-r--r--drivers/gpu/drm/meson/meson_drv.c7
-rw-r--r--drivers/gpu/drm/meson/meson_plane.c1
-rw-r--r--drivers/gpu/drm/meson/meson_venc_cvbs.c1
-rw-r--r--drivers/gpu/drm/mga/mga_drv.c5
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_cursor.c2
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.c7
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.h5
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_fb.c7
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_main.c10
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c62
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.c2
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.h1
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx_gpu.c2
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx_gpu.h1
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c51
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.h1
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_power.c14
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c53
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.c5
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.h2
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c94
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_manager.c1
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.c2
-rw-r--r--drivers/gpu/drm/msm/edp/edp_connector.c1
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.c2
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_bridge.c6
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_connector.c64
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c11
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c4
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c1
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c2
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c7
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c38
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c12
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c27
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c54
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h7
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_mdss.c63
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c6
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c59
-rw-r--r--drivers/gpu/drm/msm/msm_atomic.c34
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c37
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h12
-rw-r--r--drivers/gpu/drm/msm/msm_fb.c45
-rw-r--r--drivers/gpu/drm/msm/msm_fbdev.c58
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c46
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c2
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c85
-rw-r--r--drivers/gpu/drm/msm/msm_kms.h2
-rw-r--r--drivers/gpu/drm/msm/msm_ringbuffer.c12
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_drv.c7
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_out.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/crtc.c62
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/overlay.c71
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/conn.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/dcb.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/therm.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c16
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_crtc.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c23
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c35
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c22
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c6
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c157
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/base.c33
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/headgf119.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv40.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gpio/base.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/gf100.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf100.c19
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf119.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk110.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm107.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp100.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c15
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/g84.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/gm200.c39
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/temp.c6
-rw-r--r--drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c3
-rw-r--r--drivers/gpu/drm/omapdrm/displays/connector-hdmi.c104
-rw-r--r--drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c81
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-dpi.c3
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c2
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c2
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c5
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c2
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/Makefile2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/core.c190
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dispc.c824
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dpi.c88
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dsi.c329
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss.c406
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss.h49
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss_features.c905
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss_features.h109
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi.h16
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4.c7
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4_core.c38
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi5.c7
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi_phy.c60
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi_pll.c24
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi_wp.c12
-rw-r--r--drivers/gpu/drm/omapdrm/dss/omapdss.h25
-rw-r--r--drivers/gpu/drm/omapdrm/dss/pll.c29
-rw-r--r--drivers/gpu/drm/omapdrm/dss/venc.c86
-rw-r--r--drivers/gpu/drm/omapdrm/dss/video-pll.c3
-rw-r--r--drivers/gpu/drm/omapdrm/omap_connector.c38
-rw-r--r--drivers/gpu/drm/omapdrm/omap_crtc.c137
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.c124
-rw-r--r--drivers/gpu/drm/omapdrm/omap_encoder.c3
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fb.c2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fbdev.c1
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_plane.c3
-rw-r--r--drivers/gpu/drm/panel/panel-lvds.c16
-rw-r--r--drivers/gpu/drm/pl111/pl111_connector.c1
-rw-r--r--drivers/gpu/drm/pl111/pl111_display.c5
-rw-r--r--drivers/gpu/drm/pl111/pl111_drv.c9
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c35
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.c8
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.h1
-rw-r--r--drivers/gpu/drm/qxl/qxl_fb.c1
-rw-r--r--drivers/gpu/drm/qxl/qxl_ioctl.c17
-rw-r--r--drivers/gpu/drm/qxl/qxl_object.c4
-rw-r--r--drivers/gpu/drm/qxl/qxl_ttm.c2
-rw-r--r--drivers/gpu/drm/r128/r128_drv.c5
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon.h6
-rw-r--r--drivers/gpu/drm/radeon/radeon_acpi.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_acpi.h3
-rw-r--r--drivers/gpu/drm/radeon/radeon_audio.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_cursor.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c95
-rw-r--r--drivers/gpu/drm/radeon/radeon_dp_mst.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c14
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c30
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_kfd.c15
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_mn.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h4
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c32
-rw-r--r--drivers/gpu/drm/radeon/radeon_vm.c11
-rw-r--r--drivers/gpu/drm/radeon/vce_v2_0.c4
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_crtc.c199
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_crtc.h17
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.c30
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_encoder.c4
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_group.c38
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_kms.c129
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c1
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c12
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_plane.c119
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_plane.h3
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_vsp.c56
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_vsp.h10
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_dw_hdmi.c2
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-core.c1
-rw-r--r--drivers/gpu/drm/rockchip/dw-mipi-dsi.c1
-rw-r--r--drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c110
-rw-r--r--drivers/gpu/drm/rockchip/inno_hdmi.c5
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.c33
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fb.c31
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_gem.c28
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_gem.h3
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c174
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.h81
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_vop_reg.c375
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_vop_reg.h905
-rw-r--r--drivers/gpu/drm/savage/savage_drv.c5
-rw-r--r--drivers/gpu/drm/selftests/test-drm_mm.c4
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_drv.c6
-rw-r--r--drivers/gpu/drm/sis/sis_drv.c5
-rw-r--r--drivers/gpu/drm/sti/sti_crtc.c10
-rw-r--r--drivers/gpu/drm/sti/sti_cursor.c3
-rw-r--r--drivers/gpu/drm/sti/sti_drv.c3
-rw-r--r--drivers/gpu/drm/sti/sti_dvo.c3
-rw-r--r--drivers/gpu/drm/sti/sti_gdp.c3
-rw-r--r--drivers/gpu/drm/sti/sti_hda.c1
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi.c4
-rw-r--r--drivers/gpu/drm/sti/sti_hqvdp.c8
-rw-r--r--drivers/gpu/drm/stm/Kconfig9
-rw-r--r--drivers/gpu/drm/stm/Makefile2
-rw-r--r--drivers/gpu/drm/stm/drv.c23
-rw-r--r--drivers/gpu/drm/stm/dw_mipi_dsi-stm.c352
-rw-r--r--drivers/gpu/drm/stm/ltdc.c470
-rw-r--r--drivers/gpu/drm/stm/ltdc.h4
-rw-r--r--drivers/gpu/drm/sun4i/Kconfig16
-rw-r--r--drivers/gpu/drm/sun4i/Makefile1
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_backend.c2
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_crtc.c10
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_drv.c19
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_hdmi.h32
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c159
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_hdmi_i2c.c220
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_layer.c11
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_rgb.c11
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.h2
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tv.c11
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_layer.c2
-rw-r--r--drivers/gpu/drm/tdfx/tdfx_drv.c5
-rw-r--r--drivers/gpu/drm/tegra/Kconfig1
-rw-r--r--drivers/gpu/drm/tegra/Makefile2
-rw-r--r--drivers/gpu/drm/tegra/dc.c22
-rw-r--r--drivers/gpu/drm/tegra/dpaux.c12
-rw-r--r--drivers/gpu/drm/tegra/drm.c116
-rw-r--r--drivers/gpu/drm/tegra/drm.h12
-rw-r--r--drivers/gpu/drm/tegra/dsi.c15
-rw-r--r--drivers/gpu/drm/tegra/fb.c8
-rw-r--r--drivers/gpu/drm/tegra/gem.c78
-rw-r--r--drivers/gpu/drm/tegra/gem.h2
-rw-r--r--drivers/gpu/drm/tegra/hdmi.c15
-rw-r--r--drivers/gpu/drm/tegra/rgb.c1
-rw-r--r--drivers/gpu/drm/tegra/sor.c15
-rw-r--r--drivers/gpu/drm/tegra/trace.c2
-rw-r--r--drivers/gpu/drm/tegra/trace.h68
-rw-r--r--drivers/gpu/drm/tegra/vic.c15
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_crtc.c20
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_drv.c8
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_panel.c1
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_plane.c1
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_tfp410.c1
-rw-r--r--drivers/gpu/drm/tinydrm/Kconfig23
-rw-r--r--drivers/gpu/drm/tinydrm/Makefile2
-rw-r--r--drivers/gpu/drm/tinydrm/core/tinydrm-helpers.c60
-rw-r--r--drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c5
-rw-r--r--drivers/gpu/drm/tinydrm/mi0283qt.c8
-rw-r--r--drivers/gpu/drm/tinydrm/mipi-dbi.c17
-rw-r--r--drivers/gpu/drm/tinydrm/repaper.c1117
-rw-r--r--drivers/gpu/drm/tinydrm/st7586.c428
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c68
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_manager.c5
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c1
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c86
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c2
-rw-r--r--drivers/gpu/drm/udl/udl_connector.c2
-rw-r--r--drivers/gpu/drm/udl/udl_dmabuf.c2
-rw-r--r--drivers/gpu/drm/udl/udl_drv.c11
-rw-r--r--drivers/gpu/drm/udl/udl_fb.c13
-rw-r--r--drivers/gpu/drm/udl/udl_gem.c4
-rw-r--r--drivers/gpu/drm/udl/udl_main.c2
-rw-r--r--drivers/gpu/drm/vc4/Kconfig8
-rw-r--r--drivers/gpu/drm/vc4/vc4_bo.c291
-rw-r--r--drivers/gpu/drm/vc4/vc4_crtc.c50
-rw-r--r--drivers/gpu/drm/vc4/vc4_dpi.c13
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.c11
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.h40
-rw-r--r--drivers/gpu/drm/vc4/vc4_dsi.c24
-rw-r--r--drivers/gpu/drm/vc4/vc4_gem.c44
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c291
-rw-r--r--drivers/gpu/drm/vc4/vc4_kms.c85
-rw-r--r--drivers/gpu/drm/vc4/vc4_plane.c19
-rw-r--r--drivers/gpu/drm/vc4/vc4_regs.h113
-rw-r--r--drivers/gpu/drm/vc4/vc4_render_cl.c63
-rw-r--r--drivers/gpu/drm/vc4/vc4_v3d.c3
-rw-r--r--drivers/gpu/drm/vc4/vc4_validate.c78
-rw-r--r--drivers/gpu/drm/vc4/vc4_validate_shaders.c72
-rw-r--r--drivers/gpu/drm/vc4/vc4_vec.c2
-rw-r--r--drivers/gpu/drm/vgem/vgem_drv.c86
-rw-r--r--drivers/gpu/drm/vgem/vgem_drv.h4
-rw-r--r--drivers/gpu/drm/vgem/vgem_fence.c2
-rw-r--r--drivers/gpu/drm/via/via_drv.c5
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_display.c11
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.c1
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.h3
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_fb.c3
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_gem.c7
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_plane.c2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_ttm.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c242
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c16
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h39
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c148
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c104
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.h4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_irq.c111
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c41
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c35
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c27
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c41
-rw-r--r--drivers/gpu/drm/zte/zx_drm_drv.c6
-rw-r--r--drivers/gpu/drm/zte/zx_hdmi.c3
-rw-r--r--drivers/gpu/drm/zte/zx_plane.c2
-rw-r--r--drivers/gpu/drm/zte/zx_tvenc.c1
-rw-r--r--drivers/gpu/drm/zte/zx_vga.c1
-rw-r--r--drivers/gpu/drm/zte/zx_vou.c10
-rw-r--r--drivers/gpu/host1x/bus.c19
-rw-r--r--drivers/gpu/host1x/dev.c4
-rw-r--r--drivers/gpu/host1x/hw/intr_hw.c24
-rw-r--r--drivers/gpu/host1x/hw/syncpt_hw.c2
-rw-r--r--drivers/gpu/host1x/job.c8
-rw-r--r--drivers/gpu/ipu-v3/Kconfig1
-rw-r--r--drivers/gpu/ipu-v3/ipu-common.c4
-rw-r--r--drivers/hid/Kconfig2
-rw-r--r--drivers/hid/hid-asus.c218
-rw-r--r--drivers/hid/hid-core.c16
-rw-r--r--drivers/hid/hid-ids.h5
-rw-r--r--drivers/hid/hid-input.c196
-rw-r--r--drivers/hid/hid-logitech-hidpp.c2
-rw-r--r--drivers/hid/hid-multitouch.c50
-rw-r--r--drivers/hid/hid-ntrig.c2
-rw-r--r--drivers/hid/hid-picolcd_cir.c4
-rw-r--r--drivers/hid/hid-prodikeys.c2
-rw-r--r--drivers/hid/hid-sensor-custom.c4
-rw-r--r--drivers/hid/hid-sensor-hub.c94
-rw-r--r--drivers/hid/i2c-hid/i2c-hid.c3
-rw-r--r--drivers/hid/uhid.c3
-rw-r--r--drivers/hid/usbhid/hid-core.c3
-rw-r--r--drivers/hid/usbhid/hid-quirks.c1
-rw-r--r--drivers/hid/usbhid/usbkbd.c2
-rw-r--r--drivers/hid/usbhid/usbmouse.c2
-rw-r--r--drivers/hid/wacom_sys.c63
-rw-r--r--drivers/hid/wacom_wac.c8
-rw-r--r--drivers/hv/Kconfig1
-rw-r--r--drivers/hv/channel.c143
-rw-r--r--drivers/hv/channel_mgmt.c49
-rw-r--r--drivers/hv/connection.c7
-rw-r--r--drivers/hv/hv.c9
-rw-r--r--drivers/hv/hv_balloon.c12
-rw-r--r--drivers/hv/hv_kvp.c2
-rw-r--r--drivers/hv/hyperv_vmbus.h11
-rw-r--r--drivers/hv/ring_buffer.c169
-rw-r--r--drivers/hv/vmbus_drv.c20
-rw-r--r--drivers/hwmon/Kconfig8
-rw-r--r--drivers/hwmon/Makefile1
-rw-r--r--drivers/hwmon/acpi_power_meter.c2
-rw-r--r--drivers/hwmon/adc128d818.c2
-rw-r--r--drivers/hwmon/ads1015.c14
-rw-r--r--drivers/hwmon/adt7475.c16
-rw-r--r--drivers/hwmon/applesmc.c2
-rw-r--r--drivers/hwmon/asc7621.c4
-rw-r--r--drivers/hwmon/aspeed-pwm-tacho.c116
-rw-r--r--drivers/hwmon/da9052-hwmon.c285
-rw-r--r--drivers/hwmon/dell-smm-hwmon.c4
-rw-r--r--drivers/hwmon/ftsteutates.c4
-rw-r--r--drivers/hwmon/hwmon.c4
-rw-r--r--drivers/hwmon/i5k_amb.c2
-rw-r--r--drivers/hwmon/it87.c214
-rw-r--r--drivers/hwmon/jc42.c19
-rw-r--r--drivers/hwmon/ltq-cputemp.c163
-rw-r--r--drivers/hwmon/nct7802.c10
-rw-r--r--drivers/hwmon/pmbus/Kconfig18
-rw-r--r--drivers/hwmon/pmbus/Makefile2
-rw-r--r--drivers/hwmon/pmbus/ibm-cffps.c151
-rw-r--r--drivers/hwmon/pmbus/lm25066.c47
-rw-r--r--drivers/hwmon/pmbus/pmbus.h2
-rw-r--r--drivers/hwmon/pmbus/pmbus_core.c292
-rw-r--r--drivers/hwmon/pmbus/tps53679.c113
-rw-r--r--drivers/hwmon/scpi-hwmon.c2
-rw-r--r--drivers/hwmon/stts751.c4
-rw-r--r--drivers/hwtracing/coresight/Kconfig10
-rw-r--r--drivers/hwtracing/coresight/Makefile2
-rw-r--r--drivers/hwtracing/coresight/coresight-cpu-debug.c2
-rw-r--r--drivers/hwtracing/coresight/coresight-dynamic-replicator.c (renamed from drivers/hwtracing/coresight/coresight-replicator-qcom.c)34
-rw-r--r--drivers/hwtracing/coresight/coresight-etb10.c68
-rw-r--r--drivers/hwtracing/coresight/coresight-etm-perf.c4
-rw-r--r--drivers/hwtracing/coresight/coresight-etm.h1
-rw-r--r--drivers/hwtracing/coresight/coresight-etm3x-sysfs.c26
-rw-r--r--drivers/hwtracing/coresight/coresight-etm3x.c22
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x-sysfs.c24
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x.c6
-rw-r--r--drivers/hwtracing/coresight/coresight-funnel.c7
-rw-r--r--drivers/hwtracing/coresight/coresight-priv.h39
-rw-r--r--drivers/hwtracing/coresight/coresight-stm.c49
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-etf.c42
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-etr.c49
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc.c108
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc.h85
-rw-r--r--drivers/hwtracing/coresight/coresight-tpiu.c7
-rw-r--r--drivers/hwtracing/coresight/coresight.c8
-rw-r--r--drivers/hwtracing/intel_th/core.c359
-rw-r--r--drivers/hwtracing/intel_th/gth.c40
-rw-r--r--drivers/hwtracing/intel_th/gth.h5
-rw-r--r--drivers/hwtracing/intel_th/intel_th.h104
-rw-r--r--drivers/hwtracing/intel_th/msu.c12
-rw-r--r--drivers/hwtracing/intel_th/pci.c67
-rw-r--r--drivers/hwtracing/intel_th/pti.c115
-rw-r--r--drivers/hwtracing/intel_th/pti.h8
-rw-r--r--drivers/hwtracing/stm/core.c2
-rw-r--r--drivers/i2c/busses/Kconfig35
-rw-r--r--drivers/i2c/busses/Makefile4
-rw-r--r--drivers/i2c/busses/i2c-altera.c511
-rw-r--r--drivers/i2c/busses/i2c-aspeed.c86
-rw-r--r--drivers/i2c/busses/i2c-at91.c2
-rw-r--r--drivers/i2c/busses/i2c-bcm-iproc.c6
-rw-r--r--drivers/i2c/busses/i2c-bfin-twi.c1
-rw-r--r--drivers/i2c/busses/i2c-cadence.c6
-rw-r--r--drivers/i2c/busses/i2c-cht-wc.c363
-rw-r--r--drivers/i2c/busses/i2c-cpm.c2
-rw-r--r--drivers/i2c/busses/i2c-davinci.c10
-rw-r--r--drivers/i2c/busses/i2c-designware-platdrv.c39
-rw-r--r--drivers/i2c/busses/i2c-designware-slave.c8
-rw-r--r--drivers/i2c/busses/i2c-exynos5.c6
-rw-r--r--drivers/i2c/busses/i2c-gpio.c4
-rw-r--r--drivers/i2c/busses/i2c-hix5hd2.c6
-rw-r--r--drivers/i2c/busses/i2c-i801.c12
-rw-r--r--drivers/i2c/busses/i2c-ismt.c6
-rw-r--r--drivers/i2c/busses/i2c-kempld.c2
-rw-r--r--drivers/i2c/busses/i2c-lpc2k.c6
-rw-r--r--drivers/i2c/busses/i2c-mlxcpld.c2
-rw-r--r--drivers/i2c/busses/i2c-mt65xx.c79
-rw-r--r--drivers/i2c/busses/i2c-mv64xxx.c5
-rw-r--r--drivers/i2c/busses/i2c-nomadik.c2
-rw-r--r--drivers/i2c/busses/i2c-ocores.c2
-rw-r--r--drivers/i2c/busses/i2c-octeon-platdrv.c2
-rw-r--r--drivers/i2c/busses/i2c-opal.c2
-rw-r--r--drivers/i2c/busses/i2c-pmcmsp.c4
-rw-r--r--drivers/i2c/busses/i2c-pnx.c2
-rw-r--r--drivers/i2c/busses/i2c-powermac.c12
-rw-r--r--drivers/i2c/busses/i2c-puv3.c2
-rw-r--r--drivers/i2c/busses/i2c-pxa.c6
-rw-r--r--drivers/i2c/busses/i2c-qup.c2
-rw-r--r--drivers/i2c/busses/i2c-rcar.c5
-rw-r--r--drivers/i2c/busses/i2c-rk3x.c9
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c6
-rw-r--r--drivers/i2c/busses/i2c-sh_mobile.c4
-rw-r--r--drivers/i2c/busses/i2c-simtec.c6
-rw-r--r--drivers/i2c/busses/i2c-sirf.c6
-rw-r--r--drivers/i2c/busses/i2c-sprd.c646
-rw-r--r--drivers/i2c/busses/i2c-st.c3
-rw-r--r--drivers/i2c/busses/i2c-stm32.h20
-rw-r--r--drivers/i2c/busses/i2c-stm32f4.c22
-rw-r--r--drivers/i2c/busses/i2c-stm32f7.c972
-rw-r--r--drivers/i2c/busses/i2c-sun6i-p2wi.c6
-rw-r--r--drivers/i2c/busses/i2c-taos-evm.c2
-rw-r--r--drivers/i2c/busses/i2c-tegra.c4
-rw-r--r--drivers/i2c/busses/i2c-thunderx-pcidrv.c2
-rw-r--r--drivers/i2c/busses/i2c-uniphier-f.c46
-rw-r--r--drivers/i2c/busses/i2c-uniphier.c40
-rw-r--r--drivers/i2c/busses/i2c-versatile.c2
-rw-r--r--drivers/i2c/busses/i2c-xiic.c8
-rw-r--r--drivers/i2c/i2c-core-base.c4
-rw-r--r--drivers/i2c/i2c-core-of.c24
-rw-r--r--drivers/i2c/muxes/Kconfig3
-rw-r--r--drivers/i2c/muxes/i2c-demux-pinctrl.c4
-rw-r--r--drivers/i2c/muxes/i2c-mux-mlxcpld.c2
-rw-r--r--drivers/i2c/muxes/i2c-mux-pca9541.c9
-rw-r--r--drivers/i2c/muxes/i2c-mux-pca954x.c2
-rw-r--r--drivers/i2c/muxes/i2c-mux-pinctrl.c225
-rw-r--r--drivers/ide/ide-floppy.c2
-rw-r--r--drivers/ide/pmac.c18
-rw-r--r--drivers/idle/intel_idle.c190
-rw-r--r--drivers/iio/accel/bma180.c2
-rw-r--r--drivers/iio/accel/bmc150-accel-i2c.c1
-rw-r--r--drivers/iio/accel/da311.c2
-rw-r--r--drivers/iio/accel/sca3000.c6
-rw-r--r--drivers/iio/accel/st_accel.h5
-rw-r--r--drivers/iio/accel/st_accel_core.c6
-rw-r--r--drivers/iio/accel/st_accel_i2c.c8
-rw-r--r--drivers/iio/accel/st_accel_spi.c86
-rw-r--r--drivers/iio/adc/Kconfig33
-rw-r--r--drivers/iio/adc/Makefile3
-rw-r--r--drivers/iio/adc/ad7766.c6
-rw-r--r--drivers/iio/adc/at91-sama5d2_adc.c329
-rw-r--r--drivers/iio/adc/at91_adc.c2
-rw-r--r--drivers/iio/adc/dln2-adc.c722
-rw-r--r--drivers/iio/adc/ep93xx_adc.c255
-rw-r--r--drivers/iio/adc/ina2xx-adc.c38
-rw-r--r--drivers/iio/adc/ltc2471.c160
-rw-r--r--drivers/iio/adc/ltc2497.c54
-rw-r--r--drivers/iio/adc/max9611.c4
-rw-r--r--drivers/iio/adc/mcp3422.c6
-rw-r--r--drivers/iio/adc/meson_saradc.c13
-rw-r--r--drivers/iio/adc/mt6577_auxadc.c37
-rw-r--r--drivers/iio/adc/rockchip_saradc.c8
-rw-r--r--drivers/iio/adc/stm32-adc-core.c12
-rw-r--r--drivers/iio/adc/stm32-adc.c154
-rw-r--r--drivers/iio/adc/ti-ads1015.c611
-rw-r--r--drivers/iio/adc/ti-ads7950.c42
-rw-r--r--drivers/iio/adc/twl4030-madc.c2
-rw-r--r--drivers/iio/adc/twl6030-gpadc.c2
-rw-r--r--drivers/iio/adc/xilinx-xadc-events.c38
-rw-r--r--drivers/iio/adc/xilinx-xadc.h10
-rw-r--r--drivers/iio/chemical/Kconfig9
-rw-r--r--drivers/iio/chemical/Makefile1
-rw-r--r--drivers/iio/chemical/ccs811.c405
-rw-r--r--drivers/iio/common/hid-sensors/hid-sensor-attributes.c3
-rw-r--r--drivers/iio/common/hid-sensors/hid-sensor-trigger.c8
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_core.c31
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_i2c.c29
-rw-r--r--drivers/iio/counter/Kconfig9
-rw-r--r--drivers/iio/counter/Makefile1
-rw-r--r--drivers/iio/counter/stm32-lptimer-cnt.c383
-rw-r--r--drivers/iio/dac/stm32-dac-core.c40
-rw-r--r--drivers/iio/dac/stm32-dac.c2
-rw-r--r--drivers/iio/gyro/mpu3050-core.c10
-rw-r--r--drivers/iio/gyro/st_gyro.h1
-rw-r--r--drivers/iio/gyro/st_gyro_core.c13
-rw-r--r--drivers/iio/gyro/st_gyro_i2c.c8
-rw-r--r--drivers/iio/gyro/st_gyro_spi.c54
-rw-r--r--drivers/iio/humidity/Kconfig3
-rw-r--r--drivers/iio/humidity/hdc100x.c22
-rw-r--r--drivers/iio/humidity/hts221.h11
-rw-r--r--drivers/iio/humidity/hts221_buffer.c43
-rw-r--r--drivers/iio/humidity/hts221_core.c144
-rw-r--r--drivers/iio/humidity/htu21.c8
-rw-r--r--drivers/iio/imu/adis16400_core.c4
-rw-r--r--drivers/iio/imu/adis16480.c2
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c17
-rw-r--r--drivers/iio/inkern.c6
-rw-r--r--drivers/iio/light/apds9300.c2
-rw-r--r--drivers/iio/light/rpr0521.c336
-rw-r--r--drivers/iio/light/tcs3472.c4
-rw-r--r--drivers/iio/light/tsl2583.c2
-rw-r--r--drivers/iio/magnetometer/Kconfig4
-rw-r--r--drivers/iio/magnetometer/ak8974.c133
-rw-r--r--drivers/iio/magnetometer/ak8975.c2
-rw-r--r--drivers/iio/magnetometer/st_magn.h1
-rw-r--r--drivers/iio/magnetometer/st_magn_core.c7
-rw-r--r--drivers/iio/magnetometer/st_magn_i2c.c8
-rw-r--r--drivers/iio/magnetometer/st_magn_spi.c30
-rw-r--r--drivers/iio/orientation/hid-sensor-rotation.c2
-rw-r--r--drivers/iio/pressure/bmp280-core.c27
-rw-r--r--drivers/iio/pressure/bmp280.h5
-rw-r--r--drivers/iio/pressure/ms5637.c12
-rw-r--r--drivers/iio/pressure/st_pressure_core.c4
-rw-r--r--drivers/iio/pressure/st_pressure_i2c.c3
-rw-r--r--drivers/iio/pressure/st_pressure_spi.c33
-rw-r--r--drivers/iio/pressure/zpa2326.c12
-rw-r--r--drivers/iio/proximity/Kconfig8
-rw-r--r--drivers/iio/proximity/srf08.c227
-rw-r--r--drivers/iio/temperature/tsys01.c7
-rw-r--r--drivers/iio/trigger/Kconfig11
-rw-r--r--drivers/iio/trigger/Makefile1
-rw-r--r--drivers/iio/trigger/stm32-lptimer-trigger.c118
-rw-r--r--drivers/iio/trigger/stm32-timer-trigger.c160
-rw-r--r--drivers/infiniband/Kconfig9
-rw-r--r--drivers/infiniband/core/Makefile6
-rw-r--r--drivers/infiniband/core/addr.c12
-rw-r--r--drivers/infiniband/core/cache.c23
-rw-r--r--drivers/infiniband/core/cm.c224
-rw-r--r--drivers/infiniband/core/cma.c35
-rw-r--r--drivers/infiniband/core/core_priv.h27
-rw-r--r--drivers/infiniband/core/device.c149
-rw-r--r--drivers/infiniband/core/iwcm.c16
-rw-r--r--drivers/infiniband/core/iwpm_msg.c20
-rw-r--r--drivers/infiniband/core/iwpm_util.c15
-rw-r--r--drivers/infiniband/core/mad_rmpp.c2
-rw-r--r--drivers/infiniband/core/netlink.c321
-rw-r--r--drivers/infiniband/core/nldev.c325
-rw-r--r--drivers/infiniband/core/rdma_core.c179
-rw-r--r--drivers/infiniband/core/rdma_core.h42
-rw-r--r--drivers/infiniband/core/roce_gid_mgmt.c2
-rw-r--r--drivers/infiniband/core/rw.c24
-rw-r--r--drivers/infiniband/core/sa_query.c42
-rw-r--r--drivers/infiniband/core/sysfs.c4
-rw-r--r--drivers/infiniband/core/ucm.c2
-rw-r--r--drivers/infiniband/core/ucma.c10
-rw-r--r--drivers/infiniband/core/umem_odp.c19
-rw-r--r--drivers/infiniband/core/umem_rbtree.c4
-rw-r--r--drivers/infiniband/core/user_mad.c2
-rw-r--r--drivers/infiniband/core/uverbs.h3
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c283
-rw-r--r--drivers/infiniband/core/uverbs_ioctl.c364
-rw-r--r--drivers/infiniband/core/uverbs_ioctl_merge.c665
-rw-r--r--drivers/infiniband/core/uverbs_main.c39
-rw-r--r--drivers/infiniband/core/uverbs_marshall.c48
-rw-r--r--drivers/infiniband/core/uverbs_std_types.c281
-rw-r--r--drivers/infiniband/core/verbs.c176
-rw-r--r--drivers/infiniband/hw/bnxt_re/Kconfig1
-rw-r--r--drivers/infiniband/hw/bnxt_re/Makefile2
-rw-r--r--drivers/infiniband/hw/bnxt_re/bnxt_re.h5
-rw-r--r--drivers/infiniband/hw/bnxt_re/hw_counters.c114
-rw-r--r--drivers/infiniband/hw/bnxt_re/hw_counters.h62
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c107
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.h3
-rw-r--r--drivers/infiniband/hw/bnxt_re/main.c168
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.c486
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.h29
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.c26
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.h10
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.c10
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.h2
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.c77
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.h2
-rw-r--r--drivers/infiniband/hw/bnxt_re/roce_hsi.h4
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch.c1
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c5
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c1
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c1
-rw-r--r--drivers/infiniband/hw/cxgb4/mem.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/provider.c5
-rw-r--r--drivers/infiniband/hw/hfi1/Kconfig7
-rw-r--r--drivers/infiniband/hw/hfi1/Makefile2
-rw-r--r--drivers/infiniband/hw/hfi1/affinity.c18
-rw-r--r--drivers/infiniband/hw/hfi1/affinity.h14
-rw-r--r--drivers/infiniband/hw/hfi1/aspm.h41
-rw-r--r--drivers/infiniband/hw/hfi1/chip.c808
-rw-r--r--drivers/infiniband/hw/hfi1/chip.h29
-rw-r--r--drivers/infiniband/hw/hfi1/common.h11
-rw-r--r--drivers/infiniband/hw/hfi1/debugfs.c94
-rw-r--r--drivers/infiniband/hw/hfi1/driver.c513
-rw-r--r--drivers/infiniband/hw/hfi1/eprom.c11
-rw-r--r--drivers/infiniband/hw/hfi1/exp_rcv.c114
-rw-r--r--drivers/infiniband/hw/hfi1/exp_rcv.h190
-rw-r--r--drivers/infiniband/hw/hfi1/file_ops.c437
-rw-r--r--drivers/infiniband/hw/hfi1/firmware.c73
-rw-r--r--drivers/infiniband/hw/hfi1/hfi.h554
-rw-r--r--drivers/infiniband/hw/hfi1/init.c393
-rw-r--r--drivers/infiniband/hw/hfi1/intr.c3
-rw-r--r--drivers/infiniband/hw/hfi1/iowait.h70
-rw-r--r--drivers/infiniband/hw/hfi1/mad.c805
-rw-r--r--drivers/infiniband/hw/hfi1/mad.h5
-rw-r--r--drivers/infiniband/hw/hfi1/mmu_rb.c46
-rw-r--r--drivers/infiniband/hw/hfi1/mmu_rb.h5
-rw-r--r--drivers/infiniband/hw/hfi1/opa_compat.h21
-rw-r--r--drivers/infiniband/hw/hfi1/pcie.c384
-rw-r--r--drivers/infiniband/hw/hfi1/pio.c15
-rw-r--r--drivers/infiniband/hw/hfi1/platform.c102
-rw-r--r--drivers/infiniband/hw/hfi1/qp.c222
-rw-r--r--drivers/infiniband/hw/hfi1/qp.h23
-rw-r--r--drivers/infiniband/hw/hfi1/rc.c426
-rw-r--r--drivers/infiniband/hw/hfi1/ruc.c321
-rw-r--r--drivers/infiniband/hw/hfi1/sdma.c42
-rw-r--r--drivers/infiniband/hw/hfi1/sdma.h3
-rw-r--r--drivers/infiniband/hw/hfi1/sysfs.c4
-rw-r--r--drivers/infiniband/hw/hfi1/trace.c191
-rw-r--r--drivers/infiniband/hw/hfi1/trace.h3
-rw-r--r--drivers/infiniband/hw/hfi1/trace_ibhdrs.h456
-rw-r--r--drivers/infiniband/hw/hfi1/trace_misc.h20
-rw-r--r--drivers/infiniband/hw/hfi1/trace_mmu.h95
-rw-r--r--drivers/infiniband/hw/hfi1/trace_rx.h102
-rw-r--r--drivers/infiniband/hw/hfi1/trace_tx.h136
-rw-r--r--drivers/infiniband/hw/hfi1/uc.c60
-rw-r--r--drivers/infiniband/hw/hfi1/ud.c487
-rw-r--r--drivers/infiniband/hw/hfi1/user_exp_rcv.c371
-rw-r--r--drivers/infiniband/hw/hfi1/user_exp_rcv.h58
-rw-r--r--drivers/infiniband/hw/hfi1/user_sdma.c626
-rw-r--r--drivers/infiniband/hw/hfi1/user_sdma.h169
-rw-r--r--drivers/infiniband/hw/hfi1/verbs.c384
-rw-r--r--drivers/infiniband/hw/hfi1/verbs.h59
-rw-r--r--drivers/infiniband/hw/hfi1/verbs_txreq.c11
-rw-r--r--drivers/infiniband/hw/hfi1/vnic.h16
-rw-r--r--drivers/infiniband/hw/hfi1/vnic_main.c39
-rw-r--r--drivers/infiniband/hw/hfi1/vnic_sdma.c27
-rw-r--r--drivers/infiniband/hw/hns/Kconfig2
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_ah.c4
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_alloc.c1
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_eq.c3
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v1.c3
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_mr.c1
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_qp.c2
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_cm.c27
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_ctrl.c134
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_d.h4
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_main.c1
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_p.h14
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_pble.c9
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_puda.c8
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_status.h2
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_type.h5
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_uk.c14
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_utils.c22
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_verbs.c7
-rw-r--r--drivers/infiniband/hw/mlx4/alias_GUID.c4
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c4
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c8
-rw-r--r--drivers/infiniband/hw/mlx4/main.c54
-rw-r--r--drivers/infiniband/hw/mlx4/mcg.c9
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h42
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c1080
-rw-r--r--drivers/infiniband/hw/mlx4/srq.c17
-rw-r--r--drivers/infiniband/hw/mlx4/sysfs.c2
-rw-r--r--drivers/infiniband/hw/mlx5/Makefile2
-rw-r--r--drivers/infiniband/hw/mlx5/cmd.c20
-rw-r--r--drivers/infiniband/hw/mlx5/cmd.h4
-rw-r--r--drivers/infiniband/hw/mlx5/cong.c421
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c8
-rw-r--r--drivers/infiniband/hw/mlx5/ib_virt.c9
-rw-r--r--drivers/infiniband/hw/mlx5/mad.c4
-rw-r--r--drivers/infiniband/hw/mlx5/main.c464
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h86
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c121
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c172
-rw-r--r--drivers/infiniband/hw/mlx5/srq.c33
-rw-r--r--drivers/infiniband/hw/mthca/mthca_av.c10
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cmd.c14
-rw-r--r--drivers/infiniband/hw/mthca/mthca_dev.h4
-rw-r--r--drivers/infiniband/hw/mthca/mthca_mad.c4
-rw-r--r--drivers/infiniband/hw/mthca/mthca_main.c3
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.c7
-rw-r--r--drivers/infiniband/hw/nes/nes.c70
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c10
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_main.c6
-rw-r--r--drivers/infiniband/hw/qedr/main.c7
-rw-r--r--drivers/infiniband/hw/qedr/qedr.h3
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c5
-rw-r--r--drivers/infiniband/hw/qib/qib.h8
-rw-r--r--drivers/infiniband/hw/qib/qib_debugfs.c18
-rw-r--r--drivers/infiniband/hw/qib/qib_driver.c1
-rw-r--r--drivers/infiniband/hw/qib/qib_iba6120.c26
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7220.c29
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7322.c92
-rw-r--r--drivers/infiniband/hw/qib/qib_init.c3
-rw-r--r--drivers/infiniband/hw/qib/qib_mad.c23
-rw-r--r--drivers/infiniband/hw/qib/qib_pcie.c149
-rw-r--r--drivers/infiniband/hw/qib/qib_qp.c51
-rw-r--r--drivers/infiniband/hw/qib/qib_rc.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_ruc.c32
-rw-r--r--drivers/infiniband/hw/qib/qib_sysfs.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_ud.c43
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.c9
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.h14
-rw-r--r--drivers/infiniband/hw/usnic/usnic_fwd.c12
-rw-r--r--drivers/infiniband/hw/usnic/usnic_fwd.h2
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_main.c18
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_verbs.c43
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_verbs.h1
-rw-r--r--drivers/infiniband/hw/usnic/usnic_uiom.c6
-rw-r--r--drivers/infiniband/hw/usnic/usnic_uiom.h2
-rw-r--r--drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.c15
-rw-r--r--drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.h12
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma.h2
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c20
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h37
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c44
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_misc.c7
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_ring.h17
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c9
-rw-r--r--drivers/infiniband/sw/rdmavt/ah.c10
-rw-r--r--drivers/infiniband/sw/rdmavt/cq.c2
-rw-r--r--drivers/infiniband/sw/rdmavt/mr.c170
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.c348
-rw-r--r--drivers/infiniband/sw/rdmavt/trace_mr.h62
-rw-r--r--drivers/infiniband/sw/rdmavt/trace_tx.h11
-rw-r--r--drivers/infiniband/sw/rdmavt/vt.c9
-rw-r--r--drivers/infiniband/sw/rxe/rxe.c1
-rw-r--r--drivers/infiniband/sw/rxe/rxe.h2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_av.c7
-rw-r--r--drivers/infiniband/sw/rxe/rxe_cq.c19
-rw-r--r--drivers/infiniband/sw/rxe/rxe_hw_counters.c2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_loc.h6
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mmap.c2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mr.c12
-rw-r--r--drivers/infiniband/sw/rxe/rxe_net.c26
-rw-r--r--drivers/infiniband/sw/rxe/rxe_pool.c2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_qp.c9
-rw-r--r--drivers/infiniband/sw/rxe/rxe_req.c6
-rw-r--r--drivers/infiniband/sw/rxe/rxe_resp.c2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_task.c4
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.c67
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.h2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h3
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c8
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ethtool.c3
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c31
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_vlan.c22
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c1
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c6
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c1
-rw-r--r--drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c35
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c1
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c5
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.h4
-rw-r--r--drivers/input/input.c6
-rw-r--r--drivers/input/joystick/adi.c2
-rw-r--r--drivers/input/joystick/iforce/iforce-serio.c2
-rw-r--r--drivers/input/joystick/iforce/iforce-usb.c2
-rw-r--r--drivers/input/joystick/magellan.c2
-rw-r--r--drivers/input/joystick/spaceball.c2
-rw-r--r--drivers/input/joystick/spaceorb.c2
-rw-r--r--drivers/input/joystick/stinger.c2
-rw-r--r--drivers/input/joystick/twidjoy.c2
-rw-r--r--drivers/input/joystick/warrior.c2
-rw-r--r--drivers/input/joystick/xpad.c36
-rw-r--r--drivers/input/joystick/zhenhua.c2
-rw-r--r--drivers/input/keyboard/atkbd.c2
-rw-r--r--drivers/input/keyboard/gpio_keys.c18
-rw-r--r--drivers/input/keyboard/hil_kbd.c2
-rw-r--r--drivers/input/keyboard/hilkbd.c10
-rw-r--r--drivers/input/keyboard/lkkbd.c2
-rw-r--r--drivers/input/keyboard/newtonkbd.c2
-rw-r--r--drivers/input/keyboard/pxa27x_keypad.c15
-rw-r--r--drivers/input/keyboard/stowaway.c2
-rw-r--r--drivers/input/keyboard/sunkbd.c2
-rw-r--r--drivers/input/keyboard/tegra-kbc.c5
-rw-r--r--drivers/input/keyboard/twl4030_keypad.c2
-rw-r--r--drivers/input/keyboard/xtkbd.c2
-rw-r--r--drivers/input/misc/Kconfig23
-rw-r--r--drivers/input/misc/Makefile2
-rw-r--r--drivers/input/misc/ati_remote2.c2
-rw-r--r--drivers/input/misc/axp20x-pek.c167
-rw-r--r--drivers/input/misc/dm355evm_keys.c2
-rw-r--r--drivers/input/misc/ims-pcu.c4
-rw-r--r--drivers/input/misc/keyspan_remote.c2
-rw-r--r--drivers/input/misc/pcspkr.c17
-rw-r--r--drivers/input/misc/powermate.c2
-rw-r--r--drivers/input/misc/pwm-vibra.c267
-rw-r--r--drivers/input/misc/rk805-pwrkey.c111
-rw-r--r--drivers/input/misc/soc_button_array.c2
-rw-r--r--drivers/input/misc/twl4030-pwrbutton.c2
-rw-r--r--drivers/input/misc/twl4030-vibra.c2
-rw-r--r--drivers/input/misc/xen-kbdfront.c5
-rw-r--r--drivers/input/misc/yealink.c2
-rw-r--r--drivers/input/mouse/alps.c41
-rw-r--r--drivers/input/mouse/alps.h8
-rw-r--r--drivers/input/mouse/appletouch.c2
-rw-r--r--drivers/input/mouse/byd.c2
-rw-r--r--drivers/input/mouse/elan_i2c.h2
-rw-r--r--drivers/input/mouse/elan_i2c_core.c14
-rw-r--r--drivers/input/mouse/elan_i2c_i2c.c13
-rw-r--r--drivers/input/mouse/elan_i2c_smbus.c4
-rw-r--r--drivers/input/mouse/elantech.c10
-rw-r--r--drivers/input/mouse/psmouse-base.c2
-rw-r--r--drivers/input/mouse/synaptics.c35
-rw-r--r--drivers/input/mouse/synaptics_usb.c2
-rw-r--r--drivers/input/mouse/trackpoint.c7
-rw-r--r--drivers/input/mouse/trackpoint.h3
-rw-r--r--drivers/input/mousedev.c62
-rw-r--r--drivers/input/rmi4/rmi_f01.c13
-rw-r--r--drivers/input/rmi4/rmi_f34.c2
-rw-r--r--drivers/input/serio/Kconfig11
-rw-r--r--drivers/input/serio/Makefile1
-rw-r--r--drivers/input/serio/ambakmi.c2
-rw-r--r--drivers/input/serio/gscps2.c10
-rw-r--r--drivers/input/serio/hp_sdc.c4
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h11
-rw-r--r--drivers/input/serio/ps2-gpio.c453
-rw-r--r--drivers/input/serio/serio.c4
-rw-r--r--drivers/input/serio/serio_raw.c2
-rw-r--r--drivers/input/serio/xilinx_ps2.c12
-rw-r--r--drivers/input/tablet/acecad.c2
-rw-r--r--drivers/input/tablet/aiptek.c2
-rw-r--r--drivers/input/tablet/kbtab.c2
-rw-r--r--drivers/input/tablet/wacom_serial4.c2
-rw-r--r--drivers/input/touchscreen/ads7846.c4
-rw-r--r--drivers/input/touchscreen/atmel_mxt_ts.c46
-rw-r--r--drivers/input/touchscreen/dynapro.c2
-rw-r--r--drivers/input/touchscreen/edt-ft5x06.c3
-rw-r--r--drivers/input/touchscreen/elants_i2c.c2
-rw-r--r--drivers/input/touchscreen/elo.c2
-rw-r--r--drivers/input/touchscreen/fujitsu_ts.c2
-rw-r--r--drivers/input/touchscreen/goodix.c9
-rw-r--r--drivers/input/touchscreen/gunze.c2
-rw-r--r--drivers/input/touchscreen/hampshire.c2
-rw-r--r--drivers/input/touchscreen/htcpen.c2
-rw-r--r--drivers/input/touchscreen/inexio.c2
-rw-r--r--drivers/input/touchscreen/mtouch.c2
-rw-r--r--drivers/input/touchscreen/mxs-lradc-ts.c8
-rw-r--r--drivers/input/touchscreen/penmount.c2
-rw-r--r--drivers/input/touchscreen/raydium_i2c_ts.c2
-rw-r--r--drivers/input/touchscreen/sun4i-ts.c2
-rw-r--r--drivers/input/touchscreen/sur40.c46
-rw-r--r--drivers/input/touchscreen/surface3_spi.c2
-rw-r--r--drivers/input/touchscreen/touchit213.c2
-rw-r--r--drivers/input/touchscreen/touchright.c2
-rw-r--r--drivers/input/touchscreen/touchwin.c2
-rw-r--r--drivers/input/touchscreen/tsc40.c2
-rw-r--r--drivers/input/touchscreen/ucb1400_ts.c4
-rw-r--r--drivers/input/touchscreen/wacom_w8001.c2
-rw-r--r--drivers/iommu/Kconfig13
-rw-r--r--drivers/iommu/Makefile1
-rw-r--r--drivers/iommu/amd_iommu.c341
-rw-r--r--drivers/iommu/amd_iommu_init.c257
-rw-r--r--drivers/iommu/amd_iommu_proto.h12
-rw-r--r--drivers/iommu/amd_iommu_types.h61
-rw-r--r--drivers/iommu/amd_iommu_v2.c26
-rw-r--r--drivers/iommu/arm-smmu-regs.h220
-rw-r--r--drivers/iommu/arm-smmu-v3.c7
-rw-r--r--drivers/iommu/arm-smmu.c384
-rw-r--r--drivers/iommu/dmar.c2
-rw-r--r--drivers/iommu/exynos-iommu.c44
-rw-r--r--drivers/iommu/fsl_pamu.c27
-rw-r--r--drivers/iommu/fsl_pamu_domain.c28
-rw-r--r--drivers/iommu/intel-iommu.c291
-rw-r--r--drivers/iommu/intel-svm.c23
-rw-r--r--drivers/iommu/iommu-sysfs.c32
-rw-r--r--drivers/iommu/iommu.c59
-rw-r--r--drivers/iommu/iova.c183
-rw-r--r--drivers/iommu/ipmmu-vmsa.c242
-rw-r--r--drivers/iommu/msm_iommu.c15
-rw-r--r--drivers/iommu/mtk_iommu.c214
-rw-r--r--drivers/iommu/mtk_iommu.h9
-rw-r--r--drivers/iommu/of_iommu.c144
-rw-r--r--drivers/iommu/omap-iommu.c125
-rw-r--r--drivers/iommu/omap-iommu.h1
-rw-r--r--drivers/iommu/qcom_iommu.c930
-rw-r--r--drivers/iommu/rockchip-iommu.c52
-rw-r--r--drivers/iommu/s390-iommu.c37
-rw-r--r--drivers/iommu/tegra-gart.c45
-rw-r--r--drivers/iommu/tegra-smmu.c39
-rw-r--r--drivers/irqchip/Kconfig15
-rw-r--r--drivers/irqchip/Makefile3
-rw-r--r--drivers/irqchip/irq-armada-370-xp.c5
-rw-r--r--drivers/irqchip/irq-atmel-aic-common.c13
-rw-r--r--drivers/irqchip/irq-atmel-aic-common.h4
-rw-r--r--drivers/irqchip/irq-atmel-aic.c14
-rw-r--r--drivers/irqchip/irq-atmel-aic5.c4
-rw-r--r--drivers/irqchip/irq-bcm2835.c9
-rw-r--r--drivers/irqchip/irq-bcm2836.c5
-rw-r--r--drivers/irqchip/irq-bcm6345-l1.c3
-rw-r--r--drivers/irqchip/irq-bcm7038-l1.c3
-rw-r--r--drivers/irqchip/irq-bcm7120-l2.c10
-rw-r--r--drivers/irqchip/irq-brcmstb-l2.c1
-rw-r--r--drivers/irqchip/irq-crossbar.c6
-rw-r--r--drivers/irqchip/irq-digicolor.c8
-rw-r--r--drivers/irqchip/irq-dw-apb-ictl.c12
-rw-r--r--drivers/irqchip/irq-gic-v3-its-pci-msi.c2
-rw-r--r--drivers/irqchip/irq-gic-v3-its-platform-msi.c1
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c1536
-rw-r--r--drivers/irqchip/irq-gic-v3.c125
-rw-r--r--drivers/irqchip/irq-gic-v4.c225
-rw-r--r--drivers/irqchip/irq-gic.c19
-rw-r--r--drivers/irqchip/irq-hip04.c3
-rw-r--r--drivers/irqchip/irq-imx-gpcv2.c4
-rw-r--r--drivers/irqchip/irq-lpc32xx.c2
-rw-r--r--drivers/irqchip/irq-ls-scfg-msi.c256
-rw-r--r--drivers/irqchip/irq-metag-ext.c4
-rw-r--r--drivers/irqchip/irq-mips-cpu.c2
-rw-r--r--drivers/irqchip/irq-mips-gic.c613
-rw-r--r--drivers/irqchip/irq-mmp.c4
-rw-r--r--drivers/irqchip/irq-mtk-sysirq.c3
-rw-r--r--drivers/irqchip/irq-mxs.c4
-rw-r--r--drivers/irqchip/irq-stm32-exti.c8
-rw-r--r--drivers/irqchip/irq-sun4i.c6
-rw-r--r--drivers/irqchip/irq-tegra.c16
-rw-r--r--drivers/irqchip/irq-uniphier-aidet.c261
-rw-r--r--drivers/irqchip/irq-xilinx-intc.c4
-rw-r--r--drivers/irqchip/irq-xtensa-mx.c6
-rw-r--r--drivers/isdn/capi/kcapi.c2
-rw-r--r--drivers/isdn/hardware/eicon/divacapi.h16
-rw-r--r--drivers/isdn/hardware/eicon/message.c247
-rw-r--r--drivers/isdn/hardware/mISDN/hfcsusb.h2
-rw-r--r--drivers/isdn/hisax/hfc_usb.c2
-rw-r--r--drivers/isdn/isdnloop/isdnloop.c2
-rw-r--r--drivers/isdn/mISDN/fsm.c5
-rw-r--r--drivers/isdn/mISDN/fsm.h2
-rw-r--r--drivers/isdn/mISDN/layer1.c3
-rw-r--r--drivers/isdn/mISDN/layer2.c15
-rw-r--r--drivers/isdn/mISDN/tei.c20
-rw-r--r--drivers/leds/Kconfig20
-rw-r--r--drivers/leds/Makefile1
-rw-r--r--drivers/leds/leds-aat1290.c10
-rw-r--r--drivers/leds/leds-as3645a.c763
-rw-r--r--drivers/leds/leds-blinkm.c2
-rw-r--r--drivers/leds/leds-clevo-mail.c2
-rw-r--r--drivers/leds/leds-gpio.c7
-rw-r--r--drivers/leds/leds-is31fl32xx.c4
-rw-r--r--drivers/leds/leds-lm3533.c2
-rw-r--r--drivers/leds/leds-lp5521.c8
-rw-r--r--drivers/leds/leds-lp5562.c10
-rw-r--r--drivers/leds/leds-lp8501.c6
-rw-r--r--drivers/leds/leds-max77693.c4
-rw-r--r--drivers/leds/leds-pca955x.c350
-rw-r--r--drivers/leds/leds-powernv.c6
-rw-r--r--drivers/leds/leds-ss4200.c2
-rw-r--r--drivers/leds/leds-tlc591xx.c11
-rw-r--r--drivers/lguest/Kconfig13
-rw-r--r--drivers/lguest/Makefile26
-rw-r--r--drivers/lguest/README47
-rw-r--r--drivers/lguest/core.c398
-rw-r--r--drivers/lguest/hypercalls.c304
-rw-r--r--drivers/lguest/interrupts_and_traps.c706
-rw-r--r--drivers/lguest/lg.h258
-rw-r--r--drivers/lguest/lguest_user.c446
-rw-r--r--drivers/lguest/page_tables.c1239
-rw-r--r--drivers/lguest/segments.c228
-rw-r--r--drivers/lguest/x86/core.c724
-rw-r--r--drivers/lguest/x86/switcher_32.S388
-rw-r--r--drivers/macintosh/macio_sysfs.c2
-rw-r--r--drivers/macintosh/rack-meter.c14
-rw-r--r--drivers/macintosh/smu.c8
-rw-r--r--drivers/macintosh/via-cuda.c4
-rw-r--r--drivers/macintosh/windfarm_cpufreq_clamp.c2
-rw-r--r--drivers/macintosh/windfarm_fcu_controls.c4
-rw-r--r--drivers/macintosh/windfarm_lm75_sensor.c2
-rw-r--r--drivers/macintosh/windfarm_lm87_sensor.c6
-rw-r--r--drivers/macintosh/windfarm_max6690_sensor.c2
-rw-r--r--drivers/macintosh/windfarm_rm31.c4
-rw-r--r--drivers/macintosh/windfarm_smu_controls.c2
-rw-r--r--drivers/macintosh/windfarm_smu_sat.c4
-rw-r--r--drivers/macintosh/windfarm_smu_sensors.c10
-rw-r--r--drivers/mailbox/bcm-flexrm-mailbox.c252
-rw-r--r--drivers/mailbox/pcc.c4
-rw-r--r--drivers/mcb/mcb-core.c20
-rw-r--r--drivers/mcb/mcb-lpc.c15
-rw-r--r--drivers/mcb/mcb-parse.c6
-rw-r--r--drivers/md/bcache/alloc.c4
-rw-r--r--drivers/md/bcache/bcache.h1
-rw-r--r--drivers/md/bcache/closure.c15
-rw-r--r--drivers/md/bcache/closure.h4
-rw-r--r--drivers/md/bcache/debug.c2
-rw-r--r--drivers/md/bcache/io.c2
-rw-r--r--drivers/md/bcache/journal.c6
-rw-r--r--drivers/md/bcache/request.c33
-rw-r--r--drivers/md/bcache/super.c16
-rw-r--r--drivers/md/bcache/sysfs.c19
-rw-r--r--drivers/md/bcache/util.c50
-rw-r--r--drivers/md/bcache/writeback.c25
-rw-r--r--drivers/md/bcache/writeback.h21
-rw-r--r--drivers/md/bitmap.c9
-rw-r--r--drivers/md/dm-bio-record.h9
-rw-r--r--drivers/md/dm-bufio.c97
-rw-r--r--drivers/md/dm-bufio.h9
-rw-r--r--drivers/md/dm-cache-target.c8
-rw-r--r--drivers/md/dm-crypt.c20
-rw-r--r--drivers/md/dm-delay.c4
-rw-r--r--drivers/md/dm-era-target.c2
-rw-r--r--drivers/md/dm-flakey.c6
-rw-r--r--drivers/md/dm-integrity.c53
-rw-r--r--drivers/md/dm-io.c2
-rw-r--r--drivers/md/dm-ioctl.c2
-rw-r--r--drivers/md/dm-linear.c17
-rw-r--r--drivers/md/dm-log-writes.c52
-rw-r--r--drivers/md/dm-mpath.c19
-rw-r--r--drivers/md/dm-raid1.c12
-rw-r--r--drivers/md/dm-rq.c27
-rw-r--r--drivers/md/dm-rq.h1
-rw-r--r--drivers/md/dm-snap.c16
-rw-r--r--drivers/md/dm-stripe.c30
-rw-r--r--drivers/md/dm-switch.c4
-rw-r--r--drivers/md/dm-table.c7
-rw-r--r--drivers/md/dm-thin.c8
-rw-r--r--drivers/md/dm-verity-target.c4
-rw-r--r--drivers/md/dm-zoned-metadata.c6
-rw-r--r--drivers/md/dm-zoned-target.c4
-rw-r--r--drivers/md/dm.c47
-rw-r--r--drivers/md/faulty.c4
-rw-r--r--drivers/md/linear.c6
-rw-r--r--drivers/md/md.c35
-rw-r--r--drivers/md/md.h10
-rw-r--r--drivers/md/multipath.c8
-rw-r--r--drivers/md/raid0.c11
-rw-r--r--drivers/md/raid1.c56
-rw-r--r--drivers/md/raid10.c85
-rw-r--r--drivers/md/raid5-cache.c79
-rw-r--r--drivers/md/raid5-ppl.c177
-rw-r--r--drivers/md/raid5.c32
-rw-r--r--drivers/media/Kconfig20
-rw-r--r--drivers/media/cec/Makefile4
-rw-r--r--drivers/media/cec/cec-adap.c281
-rw-r--r--drivers/media/cec/cec-api.c92
-rw-r--r--drivers/media/cec/cec-core.c27
-rw-r--r--drivers/media/cec/cec-pin.c802
-rw-r--r--drivers/media/common/saa7146/saa7146_i2c.c2
-rw-r--r--drivers/media/common/saa7146/saa7146_vbi.c2
-rw-r--r--drivers/media/common/saa7146/saa7146_video.c2
-rw-r--r--drivers/media/common/siano/smsir.c6
-rw-r--r--drivers/media/common/v4l2-tpg/v4l2-tpg-colors.c150
-rw-r--r--drivers/media/common/v4l2-tpg/v4l2-tpg-core.c2
-rw-r--r--drivers/media/dvb-core/demux.h2
-rw-r--r--drivers/media/dvb-core/dmxdev.c24
-rw-r--r--drivers/media/dvb-core/dvb-usb-ids.h1
-rw-r--r--drivers/media/dvb-core/dvb_ca_en50221.c945
-rw-r--r--drivers/media/dvb-core/dvb_ca_en50221.h10
-rw-r--r--drivers/media/dvb-core/dvb_frontend.c15
-rw-r--r--drivers/media/dvb-frontends/Kconfig27
-rw-r--r--drivers/media/dvb-frontends/Makefile3
-rw-r--r--drivers/media/dvb-frontends/cx24123.c2
-rw-r--r--drivers/media/dvb-frontends/cxd2841er.c75
-rw-r--r--drivers/media/dvb-frontends/dib0090.c11
-rw-r--r--drivers/media/dvb-frontends/dib7000p.c2
-rw-r--r--drivers/media/dvb-frontends/dib8000.c20
-rw-r--r--drivers/media/dvb-frontends/dib8000.h1
-rw-r--r--drivers/media/dvb-frontends/dib9000.c22
-rw-r--r--drivers/media/dvb-frontends/dib9000.h7
-rw-r--r--drivers/media/dvb-frontends/drx39xyj/drxj.c35
-rw-r--r--drivers/media/dvb-frontends/drxd_hard.c6
-rw-r--r--drivers/media/dvb-frontends/isl6421.c76
-rw-r--r--drivers/media/dvb-frontends/mb86a16.c25
-rw-r--r--drivers/media/dvb-frontends/mn88472.c4
-rw-r--r--drivers/media/dvb-frontends/mn88473.c4
-rw-r--r--drivers/media/dvb-frontends/mxl5xx.c1873
-rw-r--r--drivers/media/dvb-frontends/mxl5xx.h41
-rw-r--r--drivers/media/dvb-frontends/mxl5xx_defs.h731
-rw-r--r--drivers/media/dvb-frontends/mxl5xx_regs.h367
-rw-r--r--drivers/media/dvb-frontends/s5h1420.c2
-rw-r--r--drivers/media/dvb-frontends/stv0367.c156
-rw-r--r--drivers/media/dvb-frontends/stv0910.c1813
-rw-r--r--drivers/media/dvb-frontends/stv0910.h32
-rw-r--r--drivers/media/dvb-frontends/stv0910_regs.h4760
-rw-r--r--drivers/media/dvb-frontends/stv6111.c681
-rw-r--r--drivers/media/dvb-frontends/stv6111.h21
-rw-r--r--drivers/media/dvb-frontends/zd1301_demod.c2
-rw-r--r--drivers/media/i2c/Kconfig36
-rw-r--r--drivers/media/i2c/Makefile3
-rw-r--r--drivers/media/i2c/ad9389b.c2
-rw-r--r--drivers/media/i2c/adv7180.c2
-rw-r--r--drivers/media/i2c/adv748x/Makefile7
-rw-r--r--drivers/media/i2c/adv748x/adv748x-afe.c552
-rw-r--r--drivers/media/i2c/adv748x/adv748x-core.c833
-rw-r--r--drivers/media/i2c/adv748x/adv748x-csi2.c326
-rw-r--r--drivers/media/i2c/adv748x/adv748x-hdmi.c768
-rw-r--r--drivers/media/i2c/adv748x/adv748x.h425
-rw-r--r--drivers/media/i2c/adv7511.c5
-rw-r--r--drivers/media/i2c/adv7604.c7
-rw-r--r--drivers/media/i2c/adv7842.c5
-rw-r--r--drivers/media/i2c/dw9714.c26
-rw-r--r--drivers/media/i2c/et8ek8/et8ek8_driver.c26
-rw-r--r--drivers/media/i2c/ir-kbd-i2c.c59
-rw-r--r--drivers/media/i2c/m5mols/m5mols_core.c2
-rw-r--r--drivers/media/i2c/max2175.c2
-rw-r--r--drivers/media/i2c/mt9m111.c6
-rw-r--r--drivers/media/i2c/mt9t001.c8
-rw-r--r--drivers/media/i2c/ov13858.c101
-rw-r--r--drivers/media/i2c/ov5640.c3
-rw-r--r--drivers/media/i2c/ov5645.c49
-rw-r--r--drivers/media/i2c/ov5670.c2601
-rw-r--r--drivers/media/i2c/ov6650.c (renamed from drivers/media/i2c/soc_camera/ov6650.c)77
-rw-r--r--drivers/media/i2c/ov7670.c6
-rw-r--r--drivers/media/i2c/ov9650.c67
-rw-r--r--drivers/media/i2c/s5c73m3/s5c73m3-core.c3
-rw-r--r--drivers/media/i2c/s5k5baf.c9
-rw-r--r--drivers/media/i2c/saa7127.c2
-rw-r--r--drivers/media/i2c/saa717x.c2
-rw-r--r--drivers/media/i2c/smiapp/smiapp-core.c16
-rw-r--r--drivers/media/i2c/smiapp/smiapp-quirk.c8
-rw-r--r--drivers/media/i2c/soc_camera/Kconfig6
-rw-r--r--drivers/media/i2c/soc_camera/Makefile1
-rw-r--r--drivers/media/i2c/soc_camera/mt9t031.c2
-rw-r--r--drivers/media/i2c/tc358743.c2
-rw-r--r--drivers/media/i2c/ths8200.c2
-rw-r--r--drivers/media/i2c/vs6624.c2
-rw-r--r--drivers/media/media-device.c16
-rw-r--r--drivers/media/media-entity.c2
-rw-r--r--drivers/media/pci/b2c2/flexcop-pci.c2
-rw-r--r--drivers/media/pci/bt8xx/bt878.c2
-rw-r--r--drivers/media/pci/bt8xx/bttv-driver.c4
-rw-r--r--drivers/media/pci/bt8xx/bttv-i2c.c2
-rw-r--r--drivers/media/pci/bt8xx/bttv-input.c18
-rw-r--r--drivers/media/pci/bt8xx/dst_ca.c70
-rw-r--r--drivers/media/pci/cobalt/cobalt-alsa-pcm.c4
-rw-r--r--drivers/media/pci/cobalt/cobalt-driver.c2
-rw-r--r--drivers/media/pci/cobalt/cobalt-i2c.c2
-rw-r--r--drivers/media/pci/cx18/cx18-alsa-mixer.c2
-rw-r--r--drivers/media/pci/cx18/cx18-alsa-pcm.c2
-rw-r--r--drivers/media/pci/cx18/cx18-driver.c2
-rw-r--r--drivers/media/pci/cx18/cx18-i2c.c8
-rw-r--r--drivers/media/pci/cx18/cx18-streams.c4
-rw-r--r--drivers/media/pci/cx23885/cx23885-417.c2
-rw-r--r--drivers/media/pci/cx23885/cx23885-alsa.c2
-rw-r--r--drivers/media/pci/cx23885/cx23885-cards.c6
-rw-r--r--drivers/media/pci/cx23885/cx23885-core.c2
-rw-r--r--drivers/media/pci/cx23885/cx23885-dvb.c10
-rw-r--r--drivers/media/pci/cx23885/cx23885-i2c.c2
-rw-r--r--drivers/media/pci/cx23885/cx23885-input.c16
-rw-r--r--drivers/media/pci/cx25821/cx25821-alsa.c2
-rw-r--r--drivers/media/pci/cx25821/cx25821-audio-upstream.c13
-rw-r--r--drivers/media/pci/cx25821/cx25821-core.c5
-rw-r--r--drivers/media/pci/cx25821/cx25821-i2c.c2
-rw-r--r--drivers/media/pci/cx25821/cx25821.h2
-rw-r--r--drivers/media/pci/cx88/cx88-alsa.c2
-rw-r--r--drivers/media/pci/cx88/cx88-blackbird.c2
-rw-r--r--drivers/media/pci/cx88/cx88-input.c30
-rw-r--r--drivers/media/pci/ddbridge/Kconfig21
-rw-r--r--drivers/media/pci/ddbridge/Makefile3
-rw-r--r--drivers/media/pci/ddbridge/ddbridge-core.c4070
-rw-r--r--drivers/media/pci/ddbridge/ddbridge-hw.c376
-rw-r--r--drivers/media/pci/ddbridge/ddbridge-hw.h43
-rw-r--r--drivers/media/pci/ddbridge/ddbridge-i2c.c230
-rw-r--r--drivers/media/pci/ddbridge/ddbridge-i2c.h112
-rw-r--r--drivers/media/pci/ddbridge/ddbridge-io.h71
-rw-r--r--drivers/media/pci/ddbridge/ddbridge-main.c346
-rw-r--r--drivers/media/pci/ddbridge/ddbridge-maxs8.c444
-rw-r--r--drivers/media/pci/ddbridge/ddbridge-maxs8.h29
-rw-r--r--drivers/media/pci/ddbridge/ddbridge-regs.h159
-rw-r--r--drivers/media/pci/ddbridge/ddbridge.h341
-rw-r--r--drivers/media/pci/dm1105/dm1105.c8
-rw-r--r--drivers/media/pci/dt3155/dt3155.c2
-rw-r--r--drivers/media/pci/ivtv/ivtv-alsa-mixer.c2
-rw-r--r--drivers/media/pci/ivtv/ivtv-alsa-pcm.c2
-rw-r--r--drivers/media/pci/ivtv/ivtv-driver.c2
-rw-r--r--drivers/media/pci/ivtv/ivtv-i2c.c18
-rw-r--r--drivers/media/pci/mantis/hopper_cards.c2
-rw-r--r--drivers/media/pci/mantis/mantis_cards.c2
-rw-r--r--drivers/media/pci/mantis/mantis_common.h2
-rw-r--r--drivers/media/pci/mantis/mantis_i2c.c2
-rw-r--r--drivers/media/pci/mantis/mantis_input.c6
-rw-r--r--drivers/media/pci/meye/meye.c4
-rw-r--r--drivers/media/pci/netup_unidvb/netup_unidvb_core.c2
-rw-r--r--drivers/media/pci/netup_unidvb/netup_unidvb_i2c.c2
-rw-r--r--drivers/media/pci/ngene/ngene-i2c.c2
-rw-r--r--drivers/media/pci/pluto2/pluto2.c2
-rw-r--r--drivers/media/pci/pt1/pt1.c2
-rw-r--r--drivers/media/pci/pt3/pt3.c11
-rw-r--r--drivers/media/pci/saa7134/saa7134-alsa.c2
-rw-r--r--drivers/media/pci/saa7134/saa7134-empress.c2
-rw-r--r--drivers/media/pci/saa7134/saa7134-i2c.c2
-rw-r--r--drivers/media/pci/saa7134/saa7134-input.c81
-rw-r--r--drivers/media/pci/saa7146/hexium_gemini.c2
-rw-r--r--drivers/media/pci/saa7146/hexium_orion.c2
-rw-r--r--drivers/media/pci/saa7146/mxb.c2
-rw-r--r--drivers/media/pci/saa7164/saa7164-core.c2
-rw-r--r--drivers/media/pci/saa7164/saa7164-i2c.c2
-rw-r--r--drivers/media/pci/smipcie/smipcie-ir.c6
-rw-r--r--drivers/media/pci/smipcie/smipcie.h2
-rw-r--r--drivers/media/pci/solo6x10/solo6x10-g723.c2
-rw-r--r--drivers/media/pci/solo6x10/solo6x10-gpio.c97
-rw-r--r--drivers/media/pci/solo6x10/solo6x10-tw28.c3
-rw-r--r--drivers/media/pci/solo6x10/solo6x10-v4l2.c2
-rw-r--r--drivers/media/pci/solo6x10/solo6x10.h5
-rw-r--r--drivers/media/pci/sta2x11/sta2x11_vip.c2
-rw-r--r--drivers/media/pci/ttpci/av7110.c2
-rw-r--r--drivers/media/pci/ttpci/av7110.h2
-rw-r--r--drivers/media/pci/ttpci/av7110_ca.c12
-rw-r--r--drivers/media/pci/ttpci/av7110_v4l.c2
-rw-r--r--drivers/media/pci/ttpci/budget-av.c2
-rw-r--r--drivers/media/pci/ttpci/budget-ci.c9
-rw-r--r--drivers/media/pci/ttpci/budget-patch.c2
-rw-r--r--drivers/media/pci/ttpci/budget.c2
-rw-r--r--drivers/media/pci/tw68/tw68-video.c2
-rw-r--r--drivers/media/pci/zoran/zoran_card.c2
-rw-r--r--drivers/media/platform/Kconfig21
-rw-r--r--drivers/media/platform/Makefile4
-rw-r--r--drivers/media/platform/am437x/am437x-vpfe.c4
-rw-r--r--drivers/media/platform/atmel/atmel-isc.c6
-rw-r--r--drivers/media/platform/blackfin/bfin_capture.c4
-rw-r--r--drivers/media/platform/coda/coda-bit.c29
-rw-r--r--drivers/media/platform/coda/coda-common.c78
-rw-r--r--drivers/media/platform/coda/coda_regs.h1
-rw-r--r--drivers/media/platform/coda/imx-vdoa.c2
-rw-r--r--drivers/media/platform/davinci/vpbe.c2
-rw-r--r--drivers/media/platform/davinci/vpbe_display.c4
-rw-r--r--drivers/media/platform/davinci/vpbe_osd.c2
-rw-r--r--drivers/media/platform/davinci/vpbe_venc.c2
-rw-r--r--drivers/media/platform/davinci/vpfe_capture.c2
-rw-r--r--drivers/media/platform/davinci/vpif_capture.c28
-rw-r--r--drivers/media/platform/davinci/vpif_display.c2
-rw-r--r--drivers/media/platform/exynos-gsc/gsc-m2m.c2
-rw-r--r--drivers/media/platform/exynos4-is/fimc-is-i2c.c2
-rw-r--r--drivers/media/platform/exynos4-is/fimc-is.c8
-rw-r--r--drivers/media/platform/exynos4-is/fimc-isp.c2
-rw-r--r--drivers/media/platform/exynos4-is/fimc-lite.c5
-rw-r--r--drivers/media/platform/exynos4-is/fimc-m2m.c2
-rw-r--r--drivers/media/platform/exynos4-is/media-dev.c8
-rw-r--r--drivers/media/platform/exynos4-is/mipi-csis.c4
-rw-r--r--drivers/media/platform/fsl-viu.c6
-rw-r--r--drivers/media/platform/m2m-deinterlace.c4
-rw-r--r--drivers/media/platform/marvell-ccic/cafe-driver.c4
-rw-r--r--drivers/media/platform/marvell-ccic/mcam-core.c2
-rw-r--r--drivers/media/platform/meson/Makefile1
-rw-r--r--drivers/media/platform/meson/ao-cec.c744
-rw-r--r--drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c4
-rw-r--r--drivers/media/platform/mtk-mdp/mtk_mdp_comp.c10
-rw-r--r--drivers/media/platform/mtk-mdp/mtk_mdp_core.c8
-rw-r--r--drivers/media/platform/mtk-mdp/mtk_mdp_m2m.c4
-rw-r--r--drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c37
-rw-r--r--drivers/media/platform/mx2_emmaprp.c6
-rw-r--r--drivers/media/platform/omap/omap_vout_vrfb.c3
-rw-r--r--drivers/media/platform/omap3isp/isp.c161
-rw-r--r--drivers/media/platform/omap3isp/isp.h4
-rw-r--r--drivers/media/platform/omap3isp/ispccdc.c22
-rw-r--r--drivers/media/platform/omap3isp/ispccp2.c18
-rw-r--r--drivers/media/platform/omap3isp/ispcsi2.c6
-rw-r--r--drivers/media/platform/omap3isp/ispcsiphy.c91
-rw-r--r--drivers/media/platform/omap3isp/ispcsiphy.h7
-rw-r--r--drivers/media/platform/omap3isp/ispreg.h4
-rw-r--r--drivers/media/platform/omap3isp/omap3isp.h6
-rw-r--r--drivers/media/platform/pxa_camera.c9
-rw-r--r--drivers/media/platform/qcom/camss-8x16/Makefile11
-rw-r--r--drivers/media/platform/qcom/camss-8x16/camss-csid.c1092
-rw-r--r--drivers/media/platform/qcom/camss-8x16/camss-csid.h82
-rw-r--r--drivers/media/platform/qcom/camss-8x16/camss-csiphy.c890
-rw-r--r--drivers/media/platform/qcom/camss-8x16/camss-csiphy.h77
-rw-r--r--drivers/media/platform/qcom/camss-8x16/camss-ispif.c1175
-rw-r--r--drivers/media/platform/qcom/camss-8x16/camss-ispif.h85
-rw-r--r--drivers/media/platform/qcom/camss-8x16/camss-vfe.c3088
-rw-r--r--drivers/media/platform/qcom/camss-8x16/camss-vfe.h123
-rw-r--r--drivers/media/platform/qcom/camss-8x16/camss-video.c860
-rw-r--r--drivers/media/platform/qcom/camss-8x16/camss-video.h70
-rw-r--r--drivers/media/platform/qcom/camss-8x16/camss.c746
-rw-r--r--drivers/media/platform/qcom/camss-8x16/camss.h106
-rw-r--r--drivers/media/platform/qcom/venus/helpers.c51
-rw-r--r--drivers/media/platform/qcom/venus/helpers.h1
-rw-r--r--drivers/media/platform/qcom/venus/vdec.c31
-rw-r--r--drivers/media/platform/qcom/venus/venc.c47
-rw-r--r--drivers/media/platform/rcar-vin/rcar-core.c4
-rw-r--r--drivers/media/platform/rcar_fdp1.c2
-rw-r--r--drivers/media/platform/rcar_jpu.c2
-rw-r--r--drivers/media/platform/s3c-camif/camif-core.c1
-rw-r--r--drivers/media/platform/s5p-cec/s5p_cec.c7
-rw-r--r--drivers/media/platform/s5p-g2d/g2d.c4
-rw-r--r--drivers/media/platform/s5p-jpeg/jpeg-core.c200
-rw-r--r--drivers/media/platform/s5p-jpeg/jpeg-core.h8
-rw-r--r--drivers/media/platform/s5p-jpeg/jpeg-hw-exynos4.c9
-rw-r--r--drivers/media/platform/s5p-jpeg/jpeg-regs.h2
-rw-r--r--drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c5
-rw-r--r--drivers/media/platform/soc_camera/soc_camera.c8
-rw-r--r--drivers/media/platform/soc_camera/soc_mediabus.c3
-rw-r--r--drivers/media/platform/sti/bdisp/bdisp-v4l2.c2
-rw-r--r--drivers/media/platform/sti/cec/stih-cec.c4
-rw-r--r--drivers/media/platform/sti/delta/delta-v4l2.c6
-rw-r--r--drivers/media/platform/stm32/stm32-cec.c4
-rw-r--r--drivers/media/platform/stm32/stm32-dcmi.c495
-rw-r--r--drivers/media/platform/ti-vpe/cal.c4
-rw-r--r--drivers/media/platform/ti-vpe/vpe.c4
-rw-r--r--drivers/media/platform/via-camera.c2
-rw-r--r--drivers/media/platform/video-mux.c53
-rw-r--r--drivers/media/platform/vim2m.c4
-rw-r--r--drivers/media/platform/vimc/vimc-debayer.c2
-rw-r--r--drivers/media/platform/vimc/vimc-scaler.c2
-rw-r--r--drivers/media/platform/vimc/vimc-sensor.c2
-rw-r--r--drivers/media/platform/vivid/vivid-cec.c66
-rw-r--r--drivers/media/platform/vivid/vivid-core.c8
-rw-r--r--drivers/media/platform/vsp1/vsp1.h7
-rw-r--r--drivers/media/platform/vsp1/vsp1_bru.c45
-rw-r--r--drivers/media/platform/vsp1/vsp1_bru.h4
-rw-r--r--drivers/media/platform/vsp1/vsp1_dl.c205
-rw-r--r--drivers/media/platform/vsp1/vsp1_dl.h1
-rw-r--r--drivers/media/platform/vsp1/vsp1_drm.c286
-rw-r--r--drivers/media/platform/vsp1/vsp1_drm.h38
-rw-r--r--drivers/media/platform/vsp1/vsp1_drv.c115
-rw-r--r--drivers/media/platform/vsp1/vsp1_entity.c40
-rw-r--r--drivers/media/platform/vsp1/vsp1_entity.h12
-rw-r--r--drivers/media/platform/vsp1/vsp1_lif.c5
-rw-r--r--drivers/media/platform/vsp1/vsp1_lif.h2
-rw-r--r--drivers/media/platform/vsp1/vsp1_pipe.c49
-rw-r--r--drivers/media/platform/vsp1/vsp1_pipe.h48
-rw-r--r--drivers/media/platform/vsp1/vsp1_regs.h60
-rw-r--r--drivers/media/platform/vsp1/vsp1_rpf.c27
-rw-r--r--drivers/media/platform/vsp1/vsp1_sru.c26
-rw-r--r--drivers/media/platform/vsp1/vsp1_uds.c57
-rw-r--r--drivers/media/platform/vsp1/vsp1_video.c251
-rw-r--r--drivers/media/platform/vsp1/vsp1_wpf.c28
-rw-r--r--drivers/media/platform/xilinx/xilinx-vipp.c52
-rw-r--r--drivers/media/radio/dsbr100.c2
-rw-r--r--drivers/media/radio/radio-cadet.c2
-rw-r--r--drivers/media/radio/radio-gemtek.c2
-rw-r--r--drivers/media/radio/radio-keene.c2
-rw-r--r--drivers/media/radio/radio-ma901.c2
-rw-r--r--drivers/media/radio/radio-maxiradio.c2
-rw-r--r--drivers/media/radio/radio-mr800.c2
-rw-r--r--drivers/media/radio/radio-raremono.c2
-rw-r--r--drivers/media/radio/radio-sf16fmr2.c2
-rw-r--r--drivers/media/radio/radio-shark.c2
-rw-r--r--drivers/media/radio/radio-shark2.c2
-rw-r--r--drivers/media/radio/radio-tea5764.c2
-rw-r--r--drivers/media/radio/radio-wl1273.c2
-rw-r--r--drivers/media/radio/si470x/radio-si470x-usb.c2
-rw-r--r--drivers/media/radio/si4713/radio-platform-si4713.c2
-rw-r--r--drivers/media/radio/si4713/radio-usb-si4713.c4
-rw-r--r--drivers/media/radio/wl128x/fmdrv_v4l2.c2
-rw-r--r--drivers/media/rc/Kconfig53
-rw-r--r--drivers/media/rc/Makefile3
-rw-r--r--drivers/media/rc/ati_remote.c7
-rw-r--r--drivers/media/rc/ene_ir.c6
-rw-r--r--drivers/media/rc/fintek-cir.c4
-rw-r--r--drivers/media/rc/gpio-ir-recv.c31
-rw-r--r--drivers/media/rc/gpio-ir-tx.c176
-rw-r--r--drivers/media/rc/igorplugusb.c11
-rw-r--r--drivers/media/rc/iguanair.c4
-rw-r--r--drivers/media/rc/img-ir/img-ir-hw.c6
-rw-r--r--drivers/media/rc/img-ir/img-ir-hw.h4
-rw-r--r--drivers/media/rc/img-ir/img-ir-jvc.c4
-rw-r--r--drivers/media/rc/img-ir/img-ir-nec.c20
-rw-r--r--drivers/media/rc/img-ir/img-ir-raw.c6
-rw-r--r--drivers/media/rc/img-ir/img-ir-rc5.c4
-rw-r--r--drivers/media/rc/img-ir/img-ir-rc6.c4
-rw-r--r--drivers/media/rc/img-ir/img-ir-sanyo.c4
-rw-r--r--drivers/media/rc/img-ir/img-ir-sharp.c4
-rw-r--r--drivers/media/rc/img-ir/img-ir-sony.c27
-rw-r--r--drivers/media/rc/imon.c55
-rw-r--r--drivers/media/rc/ir-hix5hd2.c4
-rw-r--r--drivers/media/rc/ir-jvc-decoder.c6
-rw-r--r--drivers/media/rc/ir-mce_kbd-decoder.c12
-rw-r--r--drivers/media/rc/ir-nec-decoder.c57
-rw-r--r--drivers/media/rc/ir-rc5-decoder.c25
-rw-r--r--drivers/media/rc/ir-rc6-decoder.c30
-rw-r--r--drivers/media/rc/ir-sanyo-decoder.c16
-rw-r--r--drivers/media/rc/ir-sharp-decoder.c6
-rw-r--r--drivers/media/rc/ir-sony-decoder.c23
-rw-r--r--drivers/media/rc/ir-spi.c1
-rw-r--r--drivers/media/rc/ir-xmp-decoder.c4
-rw-r--r--drivers/media/rc/ite-cir.c4
-rw-r--r--drivers/media/rc/keymaps/Makefile3
-rw-r--r--drivers/media/rc/keymaps/rc-adstech-dvb-t-pci.c8
-rw-r--r--drivers/media/rc/keymaps/rc-alink-dtu-m.c8
-rw-r--r--drivers/media/rc/keymaps/rc-anysee.c8
-rw-r--r--drivers/media/rc/keymaps/rc-apac-viewcomp.c8
-rw-r--r--drivers/media/rc/keymaps/rc-asus-pc39.c8
-rw-r--r--drivers/media/rc/keymaps/rc-asus-ps3-100.c8
-rw-r--r--drivers/media/rc/keymaps/rc-ati-tv-wonder-hd-600.c8
-rw-r--r--drivers/media/rc/keymaps/rc-ati-x10.c8
-rw-r--r--drivers/media/rc/keymaps/rc-avermedia-a16d.c8
-rw-r--r--drivers/media/rc/keymaps/rc-avermedia-cardbus.c8
-rw-r--r--drivers/media/rc/keymaps/rc-avermedia-dvbt.c8
-rw-r--r--drivers/media/rc/keymaps/rc-avermedia-m135a.c8
-rw-r--r--drivers/media/rc/keymaps/rc-avermedia-m733a-rm-k6.c8
-rw-r--r--drivers/media/rc/keymaps/rc-avermedia-rm-ks.c8
-rw-r--r--drivers/media/rc/keymaps/rc-avermedia.c8
-rw-r--r--drivers/media/rc/keymaps/rc-avertv-303.c8
-rw-r--r--drivers/media/rc/keymaps/rc-azurewave-ad-tu700.c8
-rw-r--r--drivers/media/rc/keymaps/rc-behold-columbus.c8
-rw-r--r--drivers/media/rc/keymaps/rc-behold.c8
-rw-r--r--drivers/media/rc/keymaps/rc-budget-ci-old.c8
-rw-r--r--drivers/media/rc/keymaps/rc-cec.c2
-rw-r--r--drivers/media/rc/keymaps/rc-cinergy-1400.c8
-rw-r--r--drivers/media/rc/keymaps/rc-cinergy.c8
-rw-r--r--drivers/media/rc/keymaps/rc-d680-dmb.c8
-rw-r--r--drivers/media/rc/keymaps/rc-delock-61959.c8
-rw-r--r--drivers/media/rc/keymaps/rc-dib0700-nec.c8
-rw-r--r--drivers/media/rc/keymaps/rc-dib0700-rc5.c8
-rw-r--r--drivers/media/rc/keymaps/rc-digitalnow-tinytwin.c8
-rw-r--r--drivers/media/rc/keymaps/rc-digittrade.c8
-rw-r--r--drivers/media/rc/keymaps/rc-dm1105-nec.c8
-rw-r--r--drivers/media/rc/keymaps/rc-dntv-live-dvb-t.c8
-rw-r--r--drivers/media/rc/keymaps/rc-dntv-live-dvbt-pro.c8
-rw-r--r--drivers/media/rc/keymaps/rc-dtt200u.c8
-rw-r--r--drivers/media/rc/keymaps/rc-dvbsky.c8
-rw-r--r--drivers/media/rc/keymaps/rc-dvico-mce.c8
-rw-r--r--drivers/media/rc/keymaps/rc-dvico-portable.c8
-rw-r--r--drivers/media/rc/keymaps/rc-em-terratec.c8
-rw-r--r--drivers/media/rc/keymaps/rc-encore-enltv-fm53.c8
-rw-r--r--drivers/media/rc/keymaps/rc-encore-enltv.c8
-rw-r--r--drivers/media/rc/keymaps/rc-encore-enltv2.c8
-rw-r--r--drivers/media/rc/keymaps/rc-evga-indtube.c8
-rw-r--r--drivers/media/rc/keymaps/rc-eztv.c8
-rw-r--r--drivers/media/rc/keymaps/rc-flydvb.c8
-rw-r--r--drivers/media/rc/keymaps/rc-flyvideo.c8
-rw-r--r--drivers/media/rc/keymaps/rc-fusionhdtv-mce.c8
-rw-r--r--drivers/media/rc/keymaps/rc-gadmei-rm008z.c8
-rw-r--r--drivers/media/rc/keymaps/rc-geekbox.c8
-rw-r--r--drivers/media/rc/keymaps/rc-genius-tvgo-a11mce.c8
-rw-r--r--drivers/media/rc/keymaps/rc-gotview7135.c8
-rw-r--r--drivers/media/rc/keymaps/rc-hauppauge.c8
-rw-r--r--drivers/media/rc/keymaps/rc-imon-mce.c8
-rw-r--r--drivers/media/rc/keymaps/rc-imon-pad.c8
-rw-r--r--drivers/media/rc/keymaps/rc-iodata-bctv7e.c8
-rw-r--r--drivers/media/rc/keymaps/rc-it913x-v1.c8
-rw-r--r--drivers/media/rc/keymaps/rc-it913x-v2.c8
-rw-r--r--drivers/media/rc/keymaps/rc-kaiomy.c8
-rw-r--r--drivers/media/rc/keymaps/rc-kworld-315u.c8
-rw-r--r--drivers/media/rc/keymaps/rc-kworld-pc150u.c8
-rw-r--r--drivers/media/rc/keymaps/rc-kworld-plus-tv-analog.c8
-rw-r--r--drivers/media/rc/keymaps/rc-leadtek-y04g0051.c8
-rw-r--r--drivers/media/rc/keymaps/rc-lme2510.c8
-rw-r--r--drivers/media/rc/keymaps/rc-manli.c8
-rw-r--r--drivers/media/rc/keymaps/rc-medion-x10-digitainer.c8
-rw-r--r--drivers/media/rc/keymaps/rc-medion-x10-or2x.c8
-rw-r--r--drivers/media/rc/keymaps/rc-medion-x10.c8
-rw-r--r--drivers/media/rc/keymaps/rc-msi-digivox-ii.c8
-rw-r--r--drivers/media/rc/keymaps/rc-msi-digivox-iii.c8
-rw-r--r--drivers/media/rc/keymaps/rc-msi-tvanywhere-plus.c8
-rw-r--r--drivers/media/rc/keymaps/rc-msi-tvanywhere.c8
-rw-r--r--drivers/media/rc/keymaps/rc-nebula.c8
-rw-r--r--drivers/media/rc/keymaps/rc-nec-terratec-cinergy-xs.c8
-rw-r--r--drivers/media/rc/keymaps/rc-norwood.c8
-rw-r--r--drivers/media/rc/keymaps/rc-npgtech.c8
-rw-r--r--drivers/media/rc/keymaps/rc-pctv-sedna.c8
-rw-r--r--drivers/media/rc/keymaps/rc-pinnacle-color.c8
-rw-r--r--drivers/media/rc/keymaps/rc-pinnacle-grey.c8
-rw-r--r--drivers/media/rc/keymaps/rc-pinnacle-pctv-hd.c8
-rw-r--r--drivers/media/rc/keymaps/rc-pixelview-002t.c8
-rw-r--r--drivers/media/rc/keymaps/rc-pixelview-mk12.c8
-rw-r--r--drivers/media/rc/keymaps/rc-pixelview-new.c8
-rw-r--r--drivers/media/rc/keymaps/rc-pixelview.c8
-rw-r--r--drivers/media/rc/keymaps/rc-powercolor-real-angel.c8
-rw-r--r--drivers/media/rc/keymaps/rc-proteus-2309.c8
-rw-r--r--drivers/media/rc/keymaps/rc-purpletv.c8
-rw-r--r--drivers/media/rc/keymaps/rc-pv951.c8
-rw-r--r--drivers/media/rc/keymaps/rc-rc6-mce.c8
-rw-r--r--drivers/media/rc/keymaps/rc-real-audio-220-32-keys.c8
-rw-r--r--drivers/media/rc/keymaps/rc-reddo.c8
-rw-r--r--drivers/media/rc/keymaps/rc-snapstream-firefly.c8
-rw-r--r--drivers/media/rc/keymaps/rc-streamzap.c8
-rw-r--r--drivers/media/rc/keymaps/rc-su3000.c8
-rw-r--r--drivers/media/rc/keymaps/rc-tbs-nec.c8
-rw-r--r--drivers/media/rc/keymaps/rc-technisat-ts35.c8
-rw-r--r--drivers/media/rc/keymaps/rc-technisat-usb2.c8
-rw-r--r--drivers/media/rc/keymaps/rc-terratec-cinergy-c-pci.c8
-rw-r--r--drivers/media/rc/keymaps/rc-terratec-cinergy-s2-hd.c8
-rw-r--r--drivers/media/rc/keymaps/rc-terratec-cinergy-xs.c8
-rw-r--r--drivers/media/rc/keymaps/rc-terratec-slim-2.c8
-rw-r--r--drivers/media/rc/keymaps/rc-terratec-slim.c8
-rw-r--r--drivers/media/rc/keymaps/rc-tevii-nec.c8
-rw-r--r--drivers/media/rc/keymaps/rc-tivo.c8
-rw-r--r--drivers/media/rc/keymaps/rc-total-media-in-hand-02.c8
-rw-r--r--drivers/media/rc/keymaps/rc-total-media-in-hand.c8
-rw-r--r--drivers/media/rc/keymaps/rc-trekstor.c8
-rw-r--r--drivers/media/rc/keymaps/rc-tt-1500.c8
-rw-r--r--drivers/media/rc/keymaps/rc-twinhan-dtv-cab-ci.c8
-rw-r--r--drivers/media/rc/keymaps/rc-twinhan1027.c8
-rw-r--r--drivers/media/rc/keymaps/rc-videomate-m1f.c8
-rw-r--r--drivers/media/rc/keymaps/rc-videomate-s350.c8
-rw-r--r--drivers/media/rc/keymaps/rc-videomate-tv-pvr.c8
-rw-r--r--drivers/media/rc/keymaps/rc-winfast-usbii-deluxe.c8
-rw-r--r--drivers/media/rc/keymaps/rc-winfast.c8
-rw-r--r--drivers/media/rc/keymaps/rc-zx-irdec.c79
-rw-r--r--drivers/media/rc/lirc_dev.c4
-rw-r--r--drivers/media/rc/mceusb.c40
-rw-r--r--drivers/media/rc/meson-ir.c4
-rw-r--r--drivers/media/rc/mtk-cir.c246
-rw-r--r--drivers/media/rc/nuvoton-cir.c120
-rw-r--r--drivers/media/rc/nuvoton-cir.h24
-rw-r--r--drivers/media/rc/pwm-ir-tx.c138
-rw-r--r--drivers/media/rc/rc-core-priv.h5
-rw-r--r--drivers/media/rc/rc-ir-raw.c68
-rw-r--r--drivers/media/rc/rc-loopback.c6
-rw-r--r--drivers/media/rc/rc-main.c265
-rw-r--r--drivers/media/rc/redrat3.c4
-rw-r--r--drivers/media/rc/serial_ir.c46
-rw-r--r--drivers/media/rc/sir_ir.c6
-rw-r--r--drivers/media/rc/st_rc.c6
-rw-r--r--drivers/media/rc/streamzap.c4
-rw-r--r--drivers/media/rc/sunxi-cir.c6
-rw-r--r--drivers/media/rc/ttusbir.c4
-rw-r--r--drivers/media/rc/winbond-cir.c37
-rw-r--r--drivers/media/rc/zx-irdec.c184
-rw-r--r--drivers/media/tuners/fc0012.c2
-rw-r--r--drivers/media/tuners/fc0013.c2
-rw-r--r--drivers/media/tuners/tda18271-maps.c4
-rw-r--r--drivers/media/tuners/tuner-simple.c2
-rw-r--r--drivers/media/usb/airspy/airspy.c4
-rw-r--r--drivers/media/usb/as102/as102_usb_drv.c2
-rw-r--r--drivers/media/usb/au0828/Kconfig1
-rw-r--r--drivers/media/usb/au0828/au0828-core.c2
-rw-r--r--drivers/media/usb/au0828/au0828-i2c.c4
-rw-r--r--drivers/media/usb/au0828/au0828-input.c6
-rw-r--r--drivers/media/usb/au0828/au0828-video.c2
-rw-r--r--drivers/media/usb/b2c2/flexcop-usb.c2
-rw-r--r--drivers/media/usb/cpia2/cpia2_usb.c2
-rw-r--r--drivers/media/usb/cpia2/cpia2_v4l.c2
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-417.c4
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-audio.c2
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-core.c3
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-dvb.c6
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-i2c.c10
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-input.c6
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-video.c2
-rw-r--r--drivers/media/usb/cx231xx/cx231xx.h4
-rw-r--r--drivers/media/usb/dvb-usb-v2/af9015.c11
-rw-r--r--drivers/media/usb/dvb-usb-v2/af9035.c16
-rw-r--r--drivers/media/usb/dvb-usb-v2/anysee.c4
-rw-r--r--drivers/media/usb/dvb-usb-v2/az6007.c13
-rw-r--r--drivers/media/usb/dvb-usb-v2/dvb_usb.h2
-rw-r--r--drivers/media/usb/dvb-usb-v2/dvb_usb_core.c5
-rw-r--r--drivers/media/usb/dvb-usb-v2/dvbsky.c4
-rw-r--r--drivers/media/usb/dvb-usb-v2/lmedm04.c6
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf.c4
-rw-r--r--drivers/media/usb/dvb-usb-v2/rtl28xxu.c13
-rw-r--r--drivers/media/usb/dvb-usb/cxusb.c30
-rw-r--r--drivers/media/usb/dvb-usb/dib0700.h2
-rw-r--r--drivers/media/usb/dvb-usb/dib0700_core.c28
-rw-r--r--drivers/media/usb/dvb-usb/dib0700_devices.c152
-rw-r--r--drivers/media/usb/dvb-usb/dtt200u.c12
-rw-r--r--drivers/media/usb/dvb-usb/dvb-usb-remote.c2
-rw-r--r--drivers/media/usb/dvb-usb/dvb-usb.h2
-rw-r--r--drivers/media/usb/dvb-usb/dw2102.c74
-rw-r--r--drivers/media/usb/dvb-usb/m920x.c4
-rw-r--r--drivers/media/usb/dvb-usb/pctv452e.c6
-rw-r--r--drivers/media/usb/dvb-usb/technisat-usb2.c2
-rw-r--r--drivers/media/usb/dvb-usb/ttusb2.c4
-rw-r--r--drivers/media/usb/em28xx/em28xx-audio.c4
-rw-r--r--drivers/media/usb/em28xx/em28xx-i2c.c2
-rw-r--r--drivers/media/usb/em28xx/em28xx-input.c126
-rw-r--r--drivers/media/usb/go7007/go7007-v4l2.c4
-rw-r--r--drivers/media/usb/go7007/snd-go7007.c2
-rw-r--r--drivers/media/usb/gspca/gspca.c2
-rw-r--r--drivers/media/usb/gspca/xirlink_cit.c2
-rw-r--r--drivers/media/usb/hackrf/hackrf.c4
-rw-r--r--drivers/media/usb/hdpvr/hdpvr-core.c2
-rw-r--r--drivers/media/usb/hdpvr/hdpvr-i2c.c5
-rw-r--r--drivers/media/usb/msi2500/msi2500.c4
-rw-r--r--drivers/media/usb/pulse8-cec/pulse8-cec.c7
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-encoder.c6
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-i2c-core.c14
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-v4l2.c2
-rw-r--r--drivers/media/usb/pwc/pwc-if.c2
-rw-r--r--drivers/media/usb/rainshadow-cec/rainshadow-cec.c7
-rw-r--r--drivers/media/usb/s2255/s2255drv.c4
-rw-r--r--drivers/media/usb/stk1160/stk1160-core.c2
-rw-r--r--drivers/media/usb/stk1160/stk1160-i2c.c2
-rw-r--r--drivers/media/usb/stk1160/stk1160-v4l.c4
-rw-r--r--drivers/media/usb/stkwebcam/stk-webcam.c6
-rw-r--r--drivers/media/usb/tm6000/tm6000-alsa.c2
-rw-r--r--drivers/media/usb/tm6000/tm6000-cards.c2
-rw-r--r--drivers/media/usb/tm6000/tm6000-input.c40
-rw-r--r--drivers/media/usb/tm6000/tm6000-video.c4
-rw-r--r--drivers/media/usb/ttusb-budget/dvb-ttusb-budget.c4
-rw-r--r--drivers/media/usb/ttusb-dec/ttusb_dec.c2
-rw-r--r--drivers/media/usb/usbtv/usbtv-audio.c2
-rw-r--r--drivers/media/usb/usbtv/usbtv-core.c2
-rw-r--r--drivers/media/usb/usbtv/usbtv-video.c2
-rw-r--r--drivers/media/usb/usbvision/usbvision-i2c.c11
-rw-r--r--drivers/media/usb/usbvision/usbvision-video.c19
-rw-r--r--drivers/media/usb/uvc/uvc_ctrl.c7
-rw-r--r--drivers/media/usb/uvc/uvc_driver.c28
-rw-r--r--drivers/media/usb/uvc/uvc_entity.c2
-rw-r--r--drivers/media/usb/uvc/uvc_queue.c9
-rw-r--r--drivers/media/usb/uvc/uvcvideo.h4
-rw-r--r--drivers/media/usb/zr364xx/zr364xx.c6
-rw-r--r--drivers/media/v4l2-core/v4l2-async.c7
-rw-r--r--drivers/media/v4l2-core/v4l2-clk.c3
-rw-r--r--drivers/media/v4l2-core/v4l2-compat-ioctl32.c13
-rw-r--r--drivers/media/v4l2-core/v4l2-flash-led-class.c139
-rw-r--r--drivers/media/v4l2-core/v4l2-fwnode.c139
-rw-r--r--drivers/media/v4l2-core/v4l2-ioctl.c12
-rw-r--r--drivers/media/v4l2-core/videobuf2-core.c27
-rw-r--r--drivers/media/v4l2-core/videobuf2-dma-contig.c5
-rw-r--r--drivers/media/v4l2-core/videobuf2-dma-sg.c8
-rw-r--r--drivers/media/v4l2-core/videobuf2-vmalloc.c8
-rw-r--r--drivers/memory/atmel-ebi.c40
-rw-r--r--drivers/memory/jz4780-nemc.c12
-rw-r--r--drivers/memory/mtk-smi.c96
-rw-r--r--drivers/memory/mvebu-devbus.c12
-rw-r--r--drivers/memory/omap-gpmc.c16
-rw-r--r--drivers/message/fusion/mptbase.c8
-rw-r--r--drivers/message/fusion/mptfc.c10
-rw-r--r--drivers/message/fusion/mptsas.c86
-rw-r--r--drivers/mfd/Kconfig57
-rw-r--r--drivers/mfd/Makefile3
-rw-r--r--drivers/mfd/ab8500-core.c6
-rw-r--r--drivers/mfd/atmel-smc.c69
-rw-r--r--drivers/mfd/axp20x-rsb.c1
-rw-r--r--drivers/mfd/axp20x.c32
-rw-r--r--drivers/mfd/bd9571mwv.c230
-rw-r--r--drivers/mfd/da9052-core.c28
-rw-r--r--drivers/mfd/da9052-spi.c2
-rw-r--r--drivers/mfd/da9055-i2c.c2
-rw-r--r--drivers/mfd/da9062-core.c6
-rw-r--r--drivers/mfd/db8500-prcmu.c62
-rw-r--r--drivers/mfd/dm355evm_msp.c2
-rw-r--r--drivers/mfd/hi6421-pmic-core.c89
-rw-r--r--drivers/mfd/intel-lpss-pci.c1
-rw-r--r--drivers/mfd/intel-lpss.c8
-rw-r--r--drivers/mfd/intel_soc_pmic_core.c34
-rw-r--r--drivers/mfd/intel_soc_pmic_core.h3
-rw-r--r--drivers/mfd/intel_soc_pmic_crc.c27
-rw-r--r--drivers/mfd/kempld-core.c2
-rw-r--r--drivers/mfd/lp87565.c7
-rw-r--r--drivers/mfd/lpc_ich.c10
-rw-r--r--drivers/mfd/max8925-i2c.c2
-rw-r--r--drivers/mfd/max8998.c6
-rw-r--r--drivers/mfd/omap-usb-tll.c4
-rw-r--r--drivers/mfd/retu-mfd.c12
-rw-r--r--drivers/mfd/rk808.c147
-rw-r--r--drivers/mfd/rtsx_pcr.c4
-rw-r--r--drivers/mfd/stm32-lptimer.c107
-rw-r--r--drivers/mfd/t7l66xb.c17
-rw-r--r--drivers/mfd/tps6105x.c8
-rw-r--r--drivers/mfd/tps65010.c2
-rw-r--r--drivers/mfd/tps68470.c106
-rw-r--r--drivers/mfd/twl-core.c10
-rw-r--r--drivers/mfd/twl4030-audio.c2
-rw-r--r--drivers/mfd/twl4030-irq.c2
-rw-r--r--drivers/mfd/twl4030-power.c2
-rw-r--r--drivers/mfd/twl6030-irq.c2
-rw-r--r--drivers/misc/Makefile1
-rw-r--r--drivers/misc/apds9802als.c4
-rw-r--r--drivers/misc/apds990x.c2
-rw-r--r--drivers/misc/aspeed-lpc-snoop.c34
-rw-r--r--drivers/misc/bh1770glc.c2
-rw-r--r--drivers/misc/cxl/api.c4
-rw-r--r--drivers/misc/cxl/fault.c16
-rw-r--r--drivers/misc/cxl/file.c8
-rw-r--r--drivers/misc/cxl/pci.c2
-rw-r--r--drivers/misc/ds1682.c2
-rw-r--r--drivers/misc/eeprom/eeprom.c2
-rw-r--r--drivers/misc/eeprom/eeprom_93xx46.c24
-rw-r--r--drivers/misc/eeprom/idt_89hpesx.c126
-rw-r--r--drivers/misc/eeprom/max6875.c2
-rw-r--r--drivers/misc/hmc6352.c2
-rw-r--r--drivers/misc/hpilo.c2
-rw-r--r--drivers/misc/ioc4.c2
-rw-r--r--drivers/misc/isl29020.c4
-rw-r--r--drivers/misc/lis3lv02d/lis3lv02d.c2
-rw-r--r--drivers/misc/lkdtm.h30
-rw-r--r--drivers/misc/lkdtm_bugs.c134
-rw-r--r--drivers/misc/lkdtm_core.c28
-rw-r--r--drivers/misc/lkdtm_refcount.c400
-rw-r--r--drivers/misc/mei/bus.c2
-rw-r--r--drivers/misc/mei/hw-me.c45
-rw-r--r--drivers/misc/mei/hw-me.h39
-rw-r--r--drivers/misc/mei/pci-me.c109
-rw-r--r--drivers/misc/mic/scif/scif_dma.c11
-rw-r--r--drivers/misc/pch_phub.c4
-rw-r--r--drivers/misc/pci_endpoint_test.c132
-rw-r--r--drivers/misc/sgi-gru/grutlbpurge.c12
-rw-r--r--drivers/misc/sram.c12
-rw-r--r--drivers/misc/ti-st/st_kim.c2
-rw-r--r--drivers/misc/tifm_7xx1.c2
-rw-r--r--drivers/misc/vmw_vmci/vmci_queue_pair.c10
-rw-r--r--drivers/mmc/Kconfig7
-rw-r--r--drivers/mmc/Makefile2
-rw-r--r--drivers/mmc/core/block.c326
-rw-r--r--drivers/mmc/core/core.c68
-rw-r--r--drivers/mmc/core/core.h6
-rw-r--r--drivers/mmc/core/debugfs.c89
-rw-r--r--drivers/mmc/core/host.c6
-rw-r--r--drivers/mmc/core/host.h1
-rw-r--r--drivers/mmc/core/mmc.c31
-rw-r--r--drivers/mmc/core/mmc_ops.c3
-rw-r--r--drivers/mmc/core/mmc_test.c97
-rw-r--r--drivers/mmc/core/queue.h6
-rw-r--r--drivers/mmc/core/sd.c12
-rw-r--r--drivers/mmc/host/Kconfig34
-rw-r--r--drivers/mmc/host/Makefile13
-rw-r--r--drivers/mmc/host/android-goldfish.c8
-rw-r--r--drivers/mmc/host/atmel-mci.c10
-rw-r--r--drivers/mmc/host/bcm2835.c2
-rw-r--r--drivers/mmc/host/cavium-octeon.c13
-rw-r--r--drivers/mmc/host/cavium.c6
-rw-r--r--drivers/mmc/host/davinci_mmc.c2
-rw-r--r--drivers/mmc/host/dw_mmc-k3.c298
-rw-r--r--drivers/mmc/host/dw_mmc.c62
-rw-r--r--drivers/mmc/host/dw_mmc.h4
-rw-r--r--drivers/mmc/host/meson-gx-mmc.c714
-rw-r--r--drivers/mmc/host/mmci.c2
-rw-r--r--drivers/mmc/host/moxart-mmc.c2
-rw-r--r--drivers/mmc/host/mtk-sd.c3
-rw-r--r--drivers/mmc/host/mxcmmc.c34
-rw-r--r--drivers/mmc/host/of_mmc_spi.c2
-rw-r--r--drivers/mmc/host/omap_hsmmc.c8
-rw-r--r--drivers/mmc/host/renesas_sdhi.h2
-rw-r--r--drivers/mmc/host/renesas_sdhi_core.c31
-rw-r--r--drivers/mmc/host/renesas_sdhi_internal_dmac.c287
-rw-r--r--drivers/mmc/host/renesas_sdhi_sys_dmac.c34
-rw-r--r--drivers/mmc/host/rtsx_usb_sdmmc.c2
-rw-r--r--drivers/mmc/host/s3cmci.c2
-rw-r--r--drivers/mmc/host/sdhci-acpi.c7
-rw-r--r--drivers/mmc/host/sdhci-bcm-kona.c4
-rw-r--r--drivers/mmc/host/sdhci-brcmstb.c39
-rw-r--r--drivers/mmc/host/sdhci-cadence.c88
-rw-r--r--drivers/mmc/host/sdhci-esdhc.h3
-rw-r--r--drivers/mmc/host/sdhci-msm.c5
-rw-r--r--drivers/mmc/host/sdhci-of-arasan.c8
-rw-r--r--drivers/mmc/host/sdhci-of-at91.c177
-rw-r--r--drivers/mmc/host/sdhci-of-esdhc.c23
-rw-r--r--drivers/mmc/host/sdhci-pci-core.c50
-rw-r--r--drivers/mmc/host/sdhci-pic32.c2
-rw-r--r--drivers/mmc/host/sdhci-pltfm.c28
-rw-r--r--drivers/mmc/host/sdhci-pltfm.h2
-rw-r--r--drivers/mmc/host/sdhci-pxav2.c30
-rw-r--r--drivers/mmc/host/sdhci-pxav3.c2
-rw-r--r--drivers/mmc/host/sdhci-s3c.c34
-rw-r--r--drivers/mmc/host/sdhci-sirf.c43
-rw-r--r--drivers/mmc/host/sdhci-st.c28
-rw-r--r--drivers/mmc/host/sdhci-tegra.c26
-rw-r--r--drivers/mmc/host/sdhci-xenon-phy.c34
-rw-r--r--drivers/mmc/host/sdhci-xenon.c121
-rw-r--r--drivers/mmc/host/sdhci-xenon.h2
-rw-r--r--drivers/mmc/host/sdhci.c94
-rw-r--r--drivers/mmc/host/sdhci.h5
-rw-r--r--drivers/mmc/host/sdricoh_cs.c2
-rw-r--r--drivers/mmc/host/sh_mmcif.c2
-rw-r--r--drivers/mmc/host/sunxi-mmc.c95
-rw-r--r--drivers/mmc/host/tmio_mmc.h9
-rw-r--r--drivers/mmc/host/tmio_mmc_core.c32
-rw-r--r--drivers/mmc/host/toshsd.c2
-rw-r--r--drivers/mmc/host/usdhi6rol0.c2
-rw-r--r--drivers/mmc/host/via-sdmmc.c2
-rw-r--r--drivers/mmc/host/vub300.c4
-rw-r--r--drivers/mmc/host/wbsd.c2
-rw-r--r--drivers/mmc/host/wmt-sdmmc.c8
-rw-r--r--drivers/mtd/devices/docg3.c49
-rw-r--r--drivers/mtd/devices/docg3.h2
-rw-r--r--drivers/mtd/devices/spear_smi.c2
-rw-r--r--drivers/mtd/devices/st_spi_fsm.c20
-rw-r--r--drivers/mtd/inftlcore.c2
-rw-r--r--drivers/mtd/maps/amd76xrom.c4
-rw-r--r--drivers/mtd/maps/ck804xrom.c4
-rw-r--r--drivers/mtd/maps/esb2rom.c4
-rw-r--r--drivers/mtd/maps/ichxrom.c4
-rw-r--r--drivers/mtd/maps/intel_vr_nor.c2
-rw-r--r--drivers/mtd/maps/lantiq-flash.c6
-rw-r--r--drivers/mtd/maps/pci.c2
-rw-r--r--drivers/mtd/maps/physmap_of_core.c8
-rw-r--r--drivers/mtd/maps/physmap_of_gemini.c16
-rw-r--r--drivers/mtd/maps/physmap_of_versatile.c2
-rw-r--r--drivers/mtd/maps/sun_uflash.c4
-rw-r--r--drivers/mtd/mtdcore.c18
-rw-r--r--drivers/mtd/mtdswap.c21
-rw-r--r--drivers/mtd/nand/Kconfig2
-rw-r--r--drivers/mtd/nand/ams-delta.c2
-rw-r--r--drivers/mtd/nand/atmel/nand-controller.c38
-rw-r--r--drivers/mtd/nand/atmel/pmecc.c2
-rw-r--r--drivers/mtd/nand/au1550nd.c2
-rw-r--r--drivers/mtd/nand/bcm47xxnflash/bcm47xxnflash.h2
-rw-r--r--drivers/mtd/nand/bf5xx_nand.c2
-rw-r--r--drivers/mtd/nand/brcmnand/brcmnand.c2
-rw-r--r--drivers/mtd/nand/cafe_nand.c2
-rw-r--r--drivers/mtd/nand/cmx270_nand.c2
-rw-r--r--drivers/mtd/nand/cs553x_nand.c2
-rw-r--r--drivers/mtd/nand/davinci_nand.c2
-rw-r--r--drivers/mtd/nand/denali.c3
-rw-r--r--drivers/mtd/nand/denali.h2
-rw-r--r--drivers/mtd/nand/denali_dt.c4
-rw-r--r--drivers/mtd/nand/diskonchip.c2
-rw-r--r--drivers/mtd/nand/docg4.c2
-rw-r--r--drivers/mtd/nand/fsl_elbc_nand.c2
-rw-r--r--drivers/mtd/nand/fsl_ifc_nand.c2
-rw-r--r--drivers/mtd/nand/fsl_upm.c2
-rw-r--r--drivers/mtd/nand/fsmc_nand.c2
-rw-r--r--drivers/mtd/nand/gpio.c2
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-nand.h2
-rw-r--r--drivers/mtd/nand/hisi504_nand.c2
-rw-r--r--drivers/mtd/nand/jz4740_nand.c2
-rw-r--r--drivers/mtd/nand/jz4780_nand.c2
-rw-r--r--drivers/mtd/nand/lpc32xx_mlc.c11
-rw-r--r--drivers/mtd/nand/lpc32xx_slc.c11
-rw-r--r--drivers/mtd/nand/mpc5121_nfc.c2
-rw-r--r--drivers/mtd/nand/mtk_ecc.c4
-rw-r--r--drivers/mtd/nand/mtk_nand.c2
-rw-r--r--drivers/mtd/nand/mxc_nand.c9
-rw-r--r--drivers/mtd/nand/nand_amd.c2
-rw-r--r--drivers/mtd/nand/nand_base.c304
-rw-r--r--drivers/mtd/nand/nand_bbt.c2
-rw-r--r--drivers/mtd/nand/nand_bch.c2
-rw-r--r--drivers/mtd/nand/nand_ecc.c2
-rw-r--r--drivers/mtd/nand/nand_hynix.c6
-rw-r--r--drivers/mtd/nand/nand_ids.c2
-rw-r--r--drivers/mtd/nand/nand_macronix.c2
-rw-r--r--drivers/mtd/nand/nand_micron.c2
-rw-r--r--drivers/mtd/nand/nand_samsung.c2
-rw-r--r--drivers/mtd/nand/nand_timings.c2
-rw-r--r--drivers/mtd/nand/nand_toshiba.c2
-rw-r--r--drivers/mtd/nand/nandsim.c54
-rw-r--r--drivers/mtd/nand/ndfc.c2
-rw-r--r--drivers/mtd/nand/nuc900_nand.c2
-rw-r--r--drivers/mtd/nand/omap2.c2
-rw-r--r--drivers/mtd/nand/orion_nand.c9
-rw-r--r--drivers/mtd/nand/oxnas_nand.c27
-rw-r--r--drivers/mtd/nand/pasemi_nand.c2
-rw-r--r--drivers/mtd/nand/plat_nand.c2
-rw-r--r--drivers/mtd/nand/pxa3xx_nand.c2
-rw-r--r--drivers/mtd/nand/qcom_nandc.c919
-rw-r--r--drivers/mtd/nand/r852.h2
-rw-r--r--drivers/mtd/nand/s3c2410.c2
-rw-r--r--drivers/mtd/nand/sh_flctl.c8
-rw-r--r--drivers/mtd/nand/sharpsl.c4
-rw-r--r--drivers/mtd/nand/sm_common.c2
-rw-r--r--drivers/mtd/nand/socrates_nand.c2
-rw-r--r--drivers/mtd/nand/sunxi_nand.c4
-rw-r--r--drivers/mtd/nand/tango_nand.c2
-rw-r--r--drivers/mtd/nand/tmio_nand.c6
-rw-r--r--drivers/mtd/nand/txx9ndfmc.c2
-rw-r--r--drivers/mtd/nand/vf610_nfc.c11
-rw-r--r--drivers/mtd/nand/xway_nand.c2
-rw-r--r--drivers/mtd/nftlcore.c2
-rw-r--r--drivers/mtd/nftlmount.c2
-rw-r--r--drivers/mtd/ofpart.c23
-rw-r--r--drivers/mtd/spi-nor/Kconfig16
-rw-r--r--drivers/mtd/spi-nor/Makefile3
-rw-r--r--drivers/mtd/spi-nor/aspeed-smc.c13
-rw-r--r--drivers/mtd/spi-nor/atmel-quadspi.c1
-rw-r--r--drivers/mtd/spi-nor/hisi-sfc.c8
-rw-r--r--drivers/mtd/spi-nor/intel-spi-pci.c82
-rw-r--r--drivers/mtd/spi-nor/mtk-quadspi.c1
-rw-r--r--drivers/mtd/spi-nor/spi-nor.c805
-rw-r--r--drivers/mtd/ssfdc.c2
-rw-r--r--drivers/mtd/tests/nandbiterrs.c2
-rw-r--r--drivers/mtd/ubi/block.c6
-rw-r--r--drivers/mtd/ubi/build.c20
-rw-r--r--drivers/mtd/ubi/fastmap.c2
-rw-r--r--drivers/mtd/ubi/ubi-media.h4
-rw-r--r--drivers/mux/Makefile5
-rw-r--r--drivers/mux/adg792a.c (renamed from drivers/mux/mux-adg792a.c)0
-rw-r--r--drivers/mux/core.c (renamed from drivers/mux/mux-core.c)14
-rw-r--r--drivers/mux/gpio.c (renamed from drivers/mux/mux-gpio.c)0
-rw-r--r--drivers/mux/mmio.c (renamed from drivers/mux/mux-mmio.c)0
-rw-r--r--drivers/net/Kconfig2
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/appletalk/ipddp.c4
-rw-r--r--drivers/net/arcnet/arcdevice.h2
-rw-r--r--drivers/net/arcnet/com20020-pci.c2
-rw-r--r--drivers/net/bonding/bond_main.c30
-rw-r--r--drivers/net/bonding/bond_options.c3
-rw-r--r--drivers/net/bonding/bond_sysfs.c2
-rw-r--r--drivers/net/can/at91_can.c2
-rw-r--r--drivers/net/can/c_can/c_can_platform.c2
-rw-r--r--drivers/net/can/janz-ican3.c2
-rw-r--r--drivers/net/dsa/b53/b53_common.c83
-rw-r--r--drivers/net/dsa/b53/b53_priv.h16
-rw-r--r--drivers/net/dsa/bcm_sf2.c52
-rw-r--r--drivers/net/dsa/bcm_sf2.h13
-rw-r--r--drivers/net/dsa/bcm_sf2_cfp.c8
-rw-r--r--drivers/net/dsa/bcm_sf2_regs.h3
-rw-r--r--drivers/net/dsa/dsa_loop.c42
-rw-r--r--drivers/net/dsa/lan9303-core.c137
-rw-r--r--drivers/net/dsa/lan9303.h11
-rw-r--r--drivers/net/dsa/lan9303_i2c.c2
-rw-r--r--drivers/net/dsa/lan9303_mdio.c23
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c124
-rw-r--r--drivers/net/dsa/mt7530.c43
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c416
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.h146
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2.c104
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2.h41
-rw-r--r--drivers/net/dsa/mv88e6xxx/phy.c1
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.h3
-rw-r--r--drivers/net/dsa/qca8k.c112
-rw-r--r--drivers/net/dsa/qca8k.h1
-rw-r--r--drivers/net/dummy.c2
-rw-r--r--drivers/net/ethernet/3com/3c509.c4
-rw-r--r--drivers/net/ethernet/3com/3c59x.c2
-rw-r--r--drivers/net/ethernet/8390/ax88796.c2
-rw-r--r--drivers/net/ethernet/Kconfig1
-rw-r--r--drivers/net/ethernet/Makefile1
-rw-r--r--drivers/net/ethernet/adi/bfin_mac.c2
-rw-r--r--drivers/net/ethernet/amd/a2065.c2
-rw-r--r--drivers/net/ethernet/amd/ariadne.c2
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.c18
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-common.h33
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c25
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dev.c207
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c501
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c86
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-main.c97
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-mdio.c81
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-pci.c4
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-phy-v1.c54
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c352
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe.h92
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_hw.c7
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c31
-rw-r--r--drivers/net/ethernet/apple/mace.c8
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw.h3
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c92
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.c8
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.h5
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_utils.h1
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_vec.c12
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c6
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c6
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c10
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h3
-rw-r--r--drivers/net/ethernet/arc/emac_main.c13
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig12
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c2
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c112
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.h24
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c14
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/Makefile2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c478
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h95
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c17
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c38
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h500
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c15
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c834
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h158
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c513
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h89
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c4
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c81
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h14
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c256
-rw-r--r--drivers/net/ethernet/broadcom/sb1250-mac.c12
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c12
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c2
-rw-r--r--drivers/net/ethernet/cadence/macb_pci.c2
-rwxr-xr-xdrivers/net/ethernet/cadence/macb_ptp.c2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c82
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.h2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_core.c728
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_ethtool.c352
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c956
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_vf_main.c598
-rw-r--r--drivers/net/ethernet/cavium/liquidio/liquidio_common.h11
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_config.h13
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_console.c153
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_device.c27
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_device.h20
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_droq.c10
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_droq.h2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_main.h6
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_network.h35
-rw-r--r--drivers/net/ethernet/cavium/liquidio/request_manager.c11
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h59
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c192
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c211
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c9
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.h6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sched.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c5
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c978
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h177
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/adapter.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c68
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h86
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c456
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/de4x5.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip.h2
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip_core.c4
-rw-r--r--drivers/net/ethernet/ec_bhf.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h8
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c14
-rw-r--r--drivers/net/ethernet/emulex/benet/be_roce.c3
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c6
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c95
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.h2
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c3
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c118
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c13
-rw-r--r--drivers/net/ethernet/freescale/fec_mpc52xx.c4
-rw-r--r--drivers/net/ethernet/freescale/fman/Makefile2
-rw-r--r--drivers/net/ethernet/freescale/fman/fman.c114
-rw-r--r--drivers/net/ethernet/freescale/fman/fman.h77
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_dtsec.c118
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_keygen.c783
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_keygen.h46
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_port.c63
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_port.h7
-rw-r--r--drivers/net/ethernet/freescale/fman/mac.c52
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c2
-rw-r--r--drivers/net/ethernet/freescale/fsl_pq_mdio.c20
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c2
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ptp.c2
-rw-r--r--drivers/net/ethernet/hisilicon/Kconfig27
-rw-r--r--drivers/net/ethernet/hisilicon/Makefile1
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hnae.c1
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hnae.h15
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c1
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c135
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ethtool.c9
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/Makefile7
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.c300
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h444
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile11
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c356
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h740
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c4265
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h519
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c213
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h17
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c1015
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h106
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c2891
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.h593
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c493
-rw-r--r--drivers/net/ethernet/hp/hp100.c2
-rw-r--r--drivers/net/ethernet/huawei/Kconfig19
-rw-r--r--drivers/net/ethernet/huawei/Makefile5
-rw-r--r--drivers/net/ethernet/huawei/hinic/Kconfig12
-rw-r--r--drivers/net/ethernet/huawei/hinic/Makefile6
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_common.c80
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_common.h38
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_dev.h64
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c978
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.h208
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c946
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h187
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_csr.h149
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c1013
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h239
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c886
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.h265
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_if.c351
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_if.h272
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_io.c533
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_io.h97
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c597
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h153
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c887
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h201
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_qp_ctxt.h214
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c878
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h117
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h368
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_main.c1112
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_port.c379
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_port.h198
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_rx.c509
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_rx.h55
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_tx.c504
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_tx.h62
-rw-r--r--drivers/net/ethernet/i825xx/lasi_82596.c16
-rw-r--r--drivers/net/ethernet/i825xx/lib82596.c9
-rw-r--r--drivers/net/ethernet/i825xx/sni_82596.c6
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c5
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c63
-rw-r--r--drivers/net/ethernet/ibm/emac/core.h1
-rw-r--r--drivers/net/ethernet/ibm/emac/debug.h2
-rw-r--r--drivers/net/ethernet/ibm/emac/mal.c8
-rw-r--r--drivers/net/ethernet/ibm/emac/rgmii.c18
-rw-r--r--drivers/net/ethernet/ibm/emac/tah.c12
-rw-r--r--drivers/net/ethernet/ibm/emac/zmii.c17
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c2
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c214
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h17
-rw-r--r--drivers/net/ethernet/intel/e1000e/hw.h4
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c4
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_netdev.c14
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h44
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c8
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c160
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c318
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_nvm.c134
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ptp.c11
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c96
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h5
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c124
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_common.c6
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_osdep.h4
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c75
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.h7
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_type.h1
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf.h31
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c45
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_main.c116
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c44
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c6
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_defines.h1
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_hw.h18
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mbx.c57
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mbx.h14
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c23
-rw-r--r--drivers/net/ethernet/intel/igbvf/ethtool.c4
-rw-r--r--drivers/net/ethernet/intel/igbvf/mbx.c4
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c47
-rw-r--r--drivers/net/ethernet/intel/igbvf/vf.c12
-rw-r--r--drivers/net/ethernet/intel/igbvf/vf.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c30
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c8
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c132
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c9
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c102
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c6
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c5
-rw-r--r--drivers/net/ethernet/marvell/mvpp2.c1591
-rw-r--r--drivers/net/ethernet/marvell/skge.c4
-rw-r--r--drivers/net/ethernet/mediatek/Kconfig6
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c183
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h80
-rw-r--r--drivers/net/ethernet/mellanox/Kconfig5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/alloc.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cq.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_cq.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_main.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c43
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_resources.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw_qos.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw_qos.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/icm.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/icm.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/intf.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c271
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mr.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/qp.c36
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/alloc.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/dev.c73
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/Makefile1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c261
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h282
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h69
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c80
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs.c298
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c376
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c80
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c229
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h50
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.h11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c88
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c41
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c213
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h78
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c476
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c135
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c201
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.h95
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c169
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/qp.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sriov.c57
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/srq.c162
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c62
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Kconfig1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Makefile5
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h422
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c238
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h15
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c80
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c578
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.h17
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c78
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c214
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h79
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c3226
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h51
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/switchib.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/switchx2.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/trap.h26
-rw-r--r--drivers/net/ethernet/moxa/moxart_ether.c15
-rw-r--r--drivers/net/ethernet/moxa/moxart_ether.h8
-rw-r--r--drivers/net/ethernet/neterion/s2io.c45
-rw-r--r--drivers/net/ethernet/netronome/Kconfig1
-rw-r--r--drivers/net/ethernet/netronome/nfp/Makefile1
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/jit.c24
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/main.c30
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/offload.c4
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/verifier.c24
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.c75
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.h22
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.c144
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.h11
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/match.c139
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c88
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_app.c22
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_app.h45
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_app_nic.c4
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_main.c111
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_main.h4
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net.h2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c35
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h1
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c13
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c593
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_main.c65
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_repr.c88
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c243
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_sriov.h86
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_port.c39
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_port.h60
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c18
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c45
-rw-r--r--drivers/net/ethernet/netronome/nfp/nic/main.c14
-rw-r--r--drivers/net/ethernet/nuvoton/w90p910_ether.c1
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c5
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c2
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dcbx.c1
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c145
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev_api.h43
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hsi.h49
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.c115
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.h18
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c58
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c66
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.h37
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.c157
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.h3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_vf.c75
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_vf.h53
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede.h19
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c205
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_filter.c483
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c9
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c10
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c8
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_dbg.c49
-rw-r--r--drivers/net/ethernet/qualcomm/Kconfig2
-rw-r--r--drivers/net/ethernet/qualcomm/Makefile2
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/Kconfig12
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/Makefile10
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c356
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h55
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c271
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.h26
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h86
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c106
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c102
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_private.h44
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c174
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.h29
-rw-r--r--drivers/net/ethernet/realtek/r8169.c5
-rw-r--r--drivers/net/ethernet/renesas/ravb.h2
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c131
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c2
-rw-r--r--drivers/net/ethernet/rocker/rocker_main.c5
-rw-r--r--drivers/net/ethernet/rocker/rocker_ofdpa.c10
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c2
-rw-r--r--drivers/net/ethernet/seeq/sgiseeq.c8
-rw-r--r--drivers/net/ethernet/sfc/efx.h4
-rw-r--r--drivers/net/ethernet/sfc/falcon/efx.h4
-rw-r--r--drivers/net/ethernet/sfc/falcon/tx.c13
-rw-r--r--drivers/net/ethernet/sfc/mcdi_port.c232
-rw-r--r--drivers/net/ethernet/sfc/tx.c13
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c15
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c152
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c193
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c25
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c2
-rw-r--r--drivers/net/ethernet/sun/ldmvsw.c2
-rw-r--r--drivers/net/ethernet/sun/niu.c24
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c2
-rw-r--r--drivers/net/ethernet/sun/sunvnet_common.c90
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-net.c1
-rw-r--r--drivers/net/ethernet/ti/cpsw-common.c2
-rw-r--r--drivers/net/ethernet/ti/cpsw.c10
-rw-r--r--drivers/net/ethernet/ti/cpts.c2
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c4
-rw-r--r--drivers/net/ethernet/ti/davinci_mdio.c10
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c14
-rw-r--r--drivers/net/ethernet/tile/tilegx.c2
-rw-r--r--drivers/net/ethernet/via/via-rhine.c2
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c2
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet.h4
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c48
-rw-r--r--drivers/net/ethernet/xircom/xirc2ps_cs.c2
-rw-r--r--drivers/net/fddi/defxx.c2
-rw-r--r--drivers/net/geneve.c322
-rw-r--r--drivers/net/hamradio/baycom_par.c2
-rw-r--r--drivers/net/hamradio/baycom_ser_fdx.c2
-rw-r--r--drivers/net/hamradio/baycom_ser_hdx.c2
-rw-r--r--drivers/net/hamradio/dmascc.c2
-rw-r--r--drivers/net/hyperv/hyperv_net.h77
-rw-r--r--drivers/net/hyperv/netvsc.c462
-rw-r--r--drivers/net/hyperv/netvsc_drv.c822
-rw-r--r--drivers/net/hyperv/rndis_filter.c274
-rw-r--r--drivers/net/ieee802154/ca8210.c6
-rw-r--r--drivers/net/ieee802154/mrf24j40.c3
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c4
-rw-r--r--drivers/net/ipvlan/ipvtap.c2
-rw-r--r--drivers/net/macsec.c1
-rw-r--r--drivers/net/macvlan.c5
-rw-r--r--drivers/net/macvtap.c2
-rw-r--r--drivers/net/phy/Kconfig74
-rw-r--r--drivers/net/phy/Makefile7
-rw-r--r--drivers/net/phy/bcm7xxx.c2
-rw-r--r--drivers/net/phy/dp83640.c7
-rw-r--r--drivers/net/phy/marvell.c320
-rw-r--r--drivers/net/phy/mdio-bcm-unimac.c103
-rw-r--r--drivers/net/phy/mdio-gpio.c2
-rw-r--r--drivers/net/phy/mdio-i2c.c109
-rw-r--r--drivers/net/phy/mdio-i2c.h19
-rw-r--r--drivers/net/phy/mdio-mux-bcm-iproc.c2
-rw-r--r--drivers/net/phy/mdio-mux-gpio.c2
-rw-r--r--drivers/net/phy/mdio-mux-mmioreg.c21
-rw-r--r--drivers/net/phy/mdio-mux.c34
-rw-r--r--drivers/net/phy/phy-core.c180
-rw-r--r--drivers/net/phy/phy.c238
-rw-r--r--drivers/net/phy/phy_device.c43
-rw-r--r--drivers/net/phy/phylink.c1462
-rw-r--r--drivers/net/phy/rockchip.c233
-rw-r--r--drivers/net/phy/sfp-bus.c475
-rw-r--r--drivers/net/phy/sfp.c915
-rw-r--r--drivers/net/phy/sfp.h28
-rw-r--r--drivers/net/tap.c11
-rw-r--r--drivers/net/tun.c270
-rw-r--r--drivers/net/usb/catc.c2
-rw-r--r--drivers/net/usb/cdc-phonet.c2
-rw-r--r--drivers/net/usb/cdc_ncm.c9
-rw-r--r--drivers/net/usb/ipheth.c2
-rw-r--r--drivers/net/usb/kaweth.c2
-rw-r--r--drivers/net/usb/r8152.c2
-rw-r--r--drivers/net/usb/rtl8150.c2
-rw-r--r--drivers/net/usb/smsc95xx.c11
-rw-r--r--drivers/net/virtio_net.c352
-rw-r--r--drivers/net/vrf.c145
-rw-r--r--drivers/net/vxlan.c162
-rw-r--r--drivers/net/wan/dscc4.c129
-rw-r--r--drivers/net/wan/z85230.c30
-rw-r--r--drivers/net/wireless/ath/ar5523/ar5523.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/Kconfig7
-rw-r--r--drivers/net/wireless/ath/ath10k/Makefile3
-rw-r--r--drivers/net/wireless/ath/ath10k/ahb.c18
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.c299
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.h30
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c29
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h9
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.c8
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c31
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.c150
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h14
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c6
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c105
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.h14
-rw-r--r--drivers/net/wireless/ath/ath10k/sdio.c12
-rw-r--r--drivers/net/wireless/ath/ath10k/sdio.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/usb.c1106
-rw-r--r--drivers/net/wireless/ath/ath10k/usb.h128
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c166
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h271
-rw-r--r--drivers/net/wireless/ath/ath10k/wow.c14
-rw-r--r--drivers/net/wireless/ath/ath10k/wow.h1
-rw-r--r--drivers/net/wireless/ath/ath6kl/usb.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_init.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c5
-rw-r--r--drivers/net/wireless/ath/carl9170/usb.c2
-rw-r--r--drivers/net/wireless/ath/wcn36xx/dxe.c5
-rw-r--r--drivers/net/wireless/ath/wcn36xx/main.c52
-rw-r--r--drivers/net/wireless/ath/wcn36xx/wcn36xx.h3
-rw-r--r--drivers/net/wireless/ath/wil6210/Kconfig12
-rw-r--r--drivers/net/wireless/ath/wil6210/Makefile2
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c84
-rw-r--r--drivers/net/wireless/ath/wil6210/debugfs.c27
-rw-r--r--drivers/net/wireless/ath/wil6210/interrupt.c14
-rw-r--r--drivers/net/wireless/ath/wil6210/main.c42
-rw-r--r--drivers/net/wireless/ath/wil6210/pcie_bus.c3
-rw-r--r--drivers/net/wireless/ath/wil6210/pm.c27
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.c6
-rw-r--r--drivers/net/wireless/ath/wil6210/wil6210.h20
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.c14
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.h720
-rw-r--r--drivers/net/wireless/atmel/at76c50x-usb.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c18
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.c5
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c7
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c3
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c11
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h3
-rw-r--r--drivers/net/wireless/cisco/airo.c2
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2100.c34
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2200.c17
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945-mac.c2
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965-mac.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/Makefile2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/8000.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/9000.c16
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/a000.c44
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/commands.h24
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/alive.h206
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/binding.h144
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/cmdhdr.h (renamed from drivers/net/wireless/intel/iwlwifi/fw/api.h)78
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/coex.h (renamed from drivers/net/wireless/intel/iwlwifi/mvm/fw-api-coex.h)73
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/commands.h657
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/config.h184
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/context.h94
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/d3.h (renamed from drivers/net/wireless/intel/iwlwifi/mvm/fw-api-d3.h)11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h127
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/debug.h345
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/filter.h183
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/led.h71
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h152
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/mac.h (renamed from drivers/net/wireless/intel/iwlwifi/mvm/fw-api-mac.h)33
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h386
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/offload.h101
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/paging.h108
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h164
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/phy.h258
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/power.h (renamed from drivers/net/wireless/intel/iwlwifi/mvm/fw-api-power.h)13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/rs.h (renamed from drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rs.h)13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/rx.h (renamed from drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h)31
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/scan.h (renamed from drivers/net/wireless/intel/iwlwifi/mvm/fw-api-scan.h)11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/sf.h138
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/sta.h (renamed from drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h)15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/stats.h (renamed from drivers/net/wireless/intel/iwlwifi/mvm/fw-api-stats.h)13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/tdls.h208
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h386
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/tof.h (renamed from drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tof.h)9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/tx.h (renamed from drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h)66
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/txq.h163
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/common_rx.c88
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.c (renamed from drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c)474
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.h (renamed from drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.h)125
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/error-dump.h30
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/file.h7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/init.c75
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/notif-wait.c25
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/nvm.c162
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/paging.c414
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/runtime.h158
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/smem.c155
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-csr.h11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c41
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-io.c14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c138
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-prph.h13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/Makefile2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/coex.c310
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/constants.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c75
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h2846
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c559
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/led.c60
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c413
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c229
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h158
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/nvm.c200
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c157
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/power.c25
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.c148
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rx.c11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c19
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c565
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.h25
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/time-event.c54
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tof.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tof.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tt.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c176
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/utils.c18
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c48
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/internal.h19
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/rx.c12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c39
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c63
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c22
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_main.c4
-rw-r--r--drivers/net/wireless/intersil/orinoco/orinoco_usb.c2
-rw-r--r--drivers/net/wireless/intersil/p54/p54usb.c2
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c2
-rw-r--r--drivers/net/wireless/marvell/libertas/if_usb.c2
-rw-r--r--drivers/net/wireless/marvell/libertas_tf/if_usb.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfg80211.c35
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfp.c4
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cmdevt.c15
-rw-r--r--drivers/net/wireless/marvell/mwifiex/debugfs.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/init.c32
-rw-r--r--drivers/net/wireless/marvell/mwifiex/join.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.c173
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.h14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/pcie.c126
-rw-r--r--drivers/net/wireless/marvell/mwifiex/scan.c15
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sdio.c3
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_cmd.c19
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c5
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_ioctl.c121
-rw-r--r--drivers/net/wireless/marvell/mwifiex/tdls.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/uap_cmd.c34
-rw-r--r--drivers/net/wireless/marvell/mwifiex/usb.c5
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/dma.c5
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/usb.c2
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/Makefile4
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/bus.h1
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/cfg80211.c315
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/cfg80211.h4
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/commands.c486
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/commands.h5
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/core.c5
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/core.h27
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/event.c67
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c408
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_bus_priv.h15
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_ipc.h11
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_regs_pearl.h2
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/qlink.h202
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/qlink_util.c26
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/qlink_util.h10
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2500usb.c2
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800lib.c5
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800mmio.c13
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800usb.c17
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt73usb.c2
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c2
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/base.c22
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/base.h2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbt_precomp.h16
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c10
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c5
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c40
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.c28
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.h1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/core.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/pci.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rc.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c5
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c8
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c12
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.h3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c9
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c5
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/dm.c8
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c9
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c7
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c365
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c17
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/table.c192
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/table.h10
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c15
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c43
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c5
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c21
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/wifi.h49
-rw-r--r--drivers/net/wireless/rsi/Makefile1
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_core.c80
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_debugfs.c3
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_hal.c368
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mac80211.c495
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_main.c5
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mgmt.c741
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_ps.c146
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_sdio.c157
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_sdio_ops.c84
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_usb.c138
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_usb_ops.c6
-rw-r--r--drivers/net/wireless/rsi/rsi_common.h1
-rw-r--r--drivers/net/wireless/rsi/rsi_hal.h66
-rw-r--r--drivers/net/wireless/rsi/rsi_main.h88
-rw-r--r--drivers/net/wireless/rsi/rsi_mgmt.h258
-rw-r--r--drivers/net/wireless/rsi/rsi_ps.h64
-rw-r--r--drivers/net/wireless/rsi/rsi_sdio.h7
-rw-r--r--drivers/net/wireless/rsi/rsi_usb.h6
-rw-r--r--drivers/net/wireless/ti/wl1251/main.c1
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c23
-rw-r--r--drivers/net/wireless/ti/wlcore/sdio.c1
-rw-r--r--drivers/net/wireless/ti/wlcore/spi.c1
-rw-r--r--drivers/net/wireless/ti/wlcore/sysfs.c2
-rw-r--r--drivers/net/wireless/ti/wlcore/wlcore.h3
-rw-r--r--drivers/net/wireless/wl3501_cs.c2
-rw-r--r--drivers/net/wireless/zydas/zd1201.c2
-rw-r--r--drivers/net/wireless/zydas/zd1211rw/zd_rf_rf2959.c2
-rw-r--r--drivers/net/wireless/zydas/zd1211rw/zd_usb.c2
-rw-r--r--drivers/net/xen-netback/interface.c4
-rw-r--r--drivers/net/xen-netfront.c2
-rw-r--r--drivers/ntb/ntb_transport.c6
-rw-r--r--drivers/ntb/test/ntb_tool.c2
-rw-r--r--drivers/nvdimm/btt.c201
-rw-r--r--drivers/nvdimm/btt.h11
-rw-r--r--drivers/nvdimm/btt_devs.c4
-rw-r--r--drivers/nvdimm/bus.c27
-rw-r--r--drivers/nvdimm/claim.c9
-rw-r--r--drivers/nvdimm/core.c10
-rw-r--r--drivers/nvdimm/label.c30
-rw-r--r--drivers/nvdimm/namespace_devs.c6
-rw-r--r--drivers/nvdimm/nd.h25
-rw-r--r--drivers/nvdimm/pfn_devs.c53
-rw-r--r--drivers/nvdimm/pmem.c48
-rw-r--r--drivers/nvdimm/pmem.h14
-rw-r--r--drivers/nvdimm/region_devs.c6
-rw-r--r--drivers/nvme/host/core.c359
-rw-r--r--drivers/nvme/host/fabrics.c26
-rw-r--r--drivers/nvme/host/fc.c149
-rw-r--r--drivers/nvme/host/lightnvm.c41
-rw-r--r--drivers/nvme/host/nvme.h30
-rw-r--r--drivers/nvme/host/pci.c110
-rw-r--r--drivers/nvme/host/rdma.c603
-rw-r--r--drivers/nvme/target/admin-cmd.c39
-rw-r--r--drivers/nvme/target/configfs.c2
-rw-r--r--drivers/nvme/target/core.c15
-rw-r--r--drivers/nvme/target/fabrics-cmd.c1
-rw-r--r--drivers/nvme/target/fc.c57
-rw-r--r--drivers/nvme/target/fcloop.c3
-rw-r--r--drivers/nvme/target/io-cmd.c6
-rw-r--r--drivers/nvme/target/loop.c1
-rw-r--r--drivers/nvme/target/nvmet.h1
-rw-r--r--drivers/nvme/target/rdma.c5
-rw-r--r--drivers/nvmem/core.c43
-rw-r--r--drivers/nvmem/lpc18xx_eeprom.c2
-rw-r--r--drivers/of/address.c24
-rw-r--r--drivers/of/base.c46
-rw-r--r--drivers/of/device.c131
-rw-r--r--drivers/of/dynamic.c33
-rw-r--r--drivers/of/irq.c77
-rw-r--r--drivers/of/of_mdio.c43
-rw-r--r--drivers/of/of_pci.c72
-rw-r--r--drivers/of/overlay.c142
-rw-r--r--drivers/of/platform.c34
-rw-r--r--drivers/of/property.c89
-rw-r--r--drivers/of/unittest-data/Makefile19
-rw-r--r--drivers/of/unittest-data/overlay.dts31
-rw-r--r--drivers/of/unittest-data/overlay_bad_symbol.dts22
-rw-r--r--drivers/of/unittest-data/overlay_base.dts11
-rw-r--r--drivers/of/unittest.c77
-rw-r--r--drivers/parisc/asp.c4
-rw-r--r--drivers/parisc/ccio-dma.c4
-rw-r--r--drivers/parisc/ccio-rm-dma.c6
-rw-r--r--drivers/parisc/dino.c6
-rw-r--r--drivers/parisc/eisa.c4
-rw-r--r--drivers/parisc/hppb.c6
-rw-r--r--drivers/parisc/lasi.c4
-rw-r--r--drivers/parisc/lba_pci.c46
-rw-r--r--drivers/parisc/sba_iommu.c6
-rw-r--r--drivers/parisc/superio.c4
-rw-r--r--drivers/parisc/wax.c4
-rw-r--r--drivers/parport/daisy.c2
-rw-r--r--drivers/parport/parport_atari.c2
-rw-r--r--drivers/parport/parport_ax88796.c6
-rw-r--r--drivers/parport/parport_gsc.c10
-rw-r--r--drivers/parport/parport_ip32.c2
-rw-r--r--drivers/parport/parport_mfc3.c2
-rw-r--r--drivers/parport/parport_pc.c24
-rw-r--r--drivers/pci/dwc/Kconfig12
-rw-r--r--drivers/pci/dwc/pci-dra7xx.c26
-rw-r--r--drivers/pci/dwc/pci-exynos.c12
-rw-r--r--drivers/pci/dwc/pci-imx6.c11
-rw-r--r--drivers/pci/dwc/pci-keystone-dw.c14
-rw-r--r--drivers/pci/dwc/pci-keystone.c10
-rw-r--r--drivers/pci/dwc/pci-keystone.h4
-rw-r--r--drivers/pci/dwc/pci-layerscape.c102
-rw-r--r--drivers/pci/dwc/pcie-armada8k.c12
-rw-r--r--drivers/pci/dwc/pcie-artpec6.c14
-rw-r--r--drivers/pci/dwc/pcie-designware-ep.c9
-rw-r--r--drivers/pci/dwc/pcie-designware-host.c17
-rw-r--r--drivers/pci/dwc/pcie-designware-plat.c4
-rw-r--r--drivers/pci/dwc/pcie-designware.c14
-rw-r--r--drivers/pci/dwc/pcie-designware.h30
-rw-r--r--drivers/pci/dwc/pcie-hisi.c5
-rw-r--r--drivers/pci/dwc/pcie-kirin.c6
-rw-r--r--drivers/pci/dwc/pcie-qcom.c409
-rw-r--r--drivers/pci/dwc/pcie-spear13xx.c8
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-test.c99
-rw-r--r--drivers/pci/endpoint/pci-epc-core.c11
-rw-r--r--drivers/pci/endpoint/pci-epc-mem.c59
-rw-r--r--drivers/pci/endpoint/pci-epf-core.c25
-rw-r--r--drivers/pci/host/Kconfig7
-rw-r--r--drivers/pci/host/pci-aardvark.c5
-rw-r--r--drivers/pci/host/pci-ftpci100.c6
-rw-r--r--drivers/pci/host/pci-hyperv.c62
-rw-r--r--drivers/pci/host/pci-mvebu.c11
-rw-r--r--drivers/pci/host/pci-tegra.c9
-rw-r--r--drivers/pci/host/pci-xgene-msi.c2
-rw-r--r--drivers/pci/host/pci-xgene.c41
-rw-r--r--drivers/pci/host/pcie-altera-msi.c6
-rw-r--r--drivers/pci/host/pcie-altera.c13
-rw-r--r--drivers/pci/host/pcie-iproc-msi.c2
-rw-r--r--drivers/pci/host/pcie-iproc-platform.c8
-rw-r--r--drivers/pci/host/pcie-iproc.c400
-rw-r--r--drivers/pci/host/pcie-iproc.h1
-rw-r--r--drivers/pci/host/pcie-mediatek.c756
-rw-r--r--drivers/pci/host/pcie-rcar.c12
-rw-r--r--drivers/pci/host/pcie-rockchip.c426
-rw-r--r--drivers/pci/host/pcie-xilinx-nwl.c11
-rw-r--r--drivers/pci/host/pcie-xilinx.c62
-rw-r--r--drivers/pci/host/vmd.c19
-rw-r--r--drivers/pci/hotplug/cpcihp_zt5550.c2
-rw-r--r--drivers/pci/hotplug/cpqphp_core.c2
-rw-r--r--drivers/pci/hotplug/ibmphp_core.c2
-rw-r--r--drivers/pci/hotplug/ibmphp_ebda.c2
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c8
-rw-r--r--drivers/pci/hotplug/pnv_php.c4
-rw-r--r--drivers/pci/hotplug/rpadlpar_core.c4
-rw-r--r--drivers/pci/hotplug/rpadlpar_sysfs.c2
-rw-r--r--drivers/pci/hotplug/rpaphp_core.c2
-rw-r--r--drivers/pci/hotplug/rpaphp_pci.c4
-rw-r--r--drivers/pci/hotplug/rpaphp_slot.c4
-rw-r--r--drivers/pci/hotplug/shpchp_core.c2
-rw-r--r--drivers/pci/hotplug/shpchp_hpc.c2
-rw-r--r--drivers/pci/iov.c7
-rw-r--r--drivers/pci/msi.c40
-rw-r--r--drivers/pci/pci-acpi.c4
-rw-r--r--drivers/pci/pci-driver.c5
-rw-r--r--drivers/pci/pci-label.c4
-rw-r--r--drivers/pci/pci-sysfs.c21
-rw-r--r--drivers/pci/pci.c67
-rw-r--r--drivers/pci/pci.h1
-rw-r--r--drivers/pci/pcie/aer/aerdrv.c25
-rw-r--r--drivers/pci/pcie/aer/aerdrv_core.c4
-rw-r--r--drivers/pci/pcie/pcie-dpc.c187
-rw-r--r--drivers/pci/pcie/portdrv_pci.c107
-rw-r--r--drivers/pci/probe.c164
-rw-r--r--drivers/pci/quirks.c160
-rw-r--r--drivers/pci/setup-irq.c32
-rw-r--r--drivers/pci/setup-res.c13
-rw-r--r--drivers/pcmcia/db1xxx_ss.c33
-rw-r--r--drivers/perf/arm_pmu.c6
-rw-r--r--drivers/perf/xgene_pmu.c74
-rw-r--r--drivers/phy/Kconfig11
-rw-r--r--drivers/phy/Makefile5
-rw-r--r--drivers/phy/allwinner/phy-sun4i-usb.c112
-rw-r--r--drivers/phy/broadcom/phy-bcm-ns2-usbdrd.c8
-rw-r--r--drivers/phy/broadcom/phy-brcm-sata.c2
-rw-r--r--drivers/phy/lantiq/Kconfig9
-rw-r--r--drivers/phy/lantiq/Makefile1
-rw-r--r--drivers/phy/lantiq/phy-lantiq-rcu-usb2.c254
-rw-r--r--drivers/phy/marvell/Kconfig11
-rw-r--r--drivers/phy/marvell/Makefile1
-rw-r--r--drivers/phy/marvell/phy-mvebu-cp110-comphy.c644
-rw-r--r--drivers/phy/mediatek/Kconfig14
-rw-r--r--drivers/phy/mediatek/Makefile5
-rw-r--r--drivers/phy/mediatek/phy-mtk-tphy.c (renamed from drivers/phy/phy-mt65xx-usb3.c)557
-rw-r--r--drivers/phy/motorola/phy-cpcap-usb.c2
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp.c162
-rw-r--r--drivers/phy/qualcomm/phy-qcom-usb-hs.c14
-rw-r--r--drivers/phy/ralink/Kconfig11
-rw-r--r--drivers/phy/ralink/Makefile1
-rw-r--r--drivers/phy/ralink/phy-ralink-usb.c249
-rw-r--r--drivers/phy/rockchip/phy-rockchip-inno-usb2.c225
-rw-r--r--drivers/phy/rockchip/phy-rockchip-pcie.c131
-rw-r--r--drivers/phy/rockchip/phy-rockchip-typec.c3
-rw-r--r--drivers/phy/samsung/phy-exynos-dp-video.c5
-rw-r--r--drivers/phy/samsung/phy-exynos5-usbdrd.c7
-rw-r--r--drivers/phy/samsung/phy-samsung-usb2.c9
-rw-r--r--drivers/phy/ti/phy-ti-pipe3.c10
-rw-r--r--drivers/phy/ti/phy-twl4030-usb.c4
-rw-r--r--drivers/pinctrl/Kconfig17
-rw-r--r--drivers/pinctrl/Makefile3
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed-g4.c70
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c64
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed.c21
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed.h1
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm281xx.c2
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm2835.c31
-rw-r--r--drivers/pinctrl/berlin/berlin.c4
-rw-r--r--drivers/pinctrl/core.c17
-rw-r--r--drivers/pinctrl/core.h6
-rw-r--r--drivers/pinctrl/devicetree.c9
-rw-r--r--drivers/pinctrl/freescale/Kconfig7
-rw-r--r--drivers/pinctrl/freescale/Makefile1
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx.c131
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx.h20
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx23.c2
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx28.c2
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx7ulp.c364
-rw-r--r--drivers/pinctrl/freescale/pinctrl-vf610.c25
-rw-r--r--drivers/pinctrl/intel/Kconfig19
-rw-r--r--drivers/pinctrl/intel/Makefile2
-rw-r--r--drivers/pinctrl/intel/pinctrl-baytrail.c4
-rw-r--r--drivers/pinctrl/intel/pinctrl-cannonlake.c424
-rw-r--r--drivers/pinctrl/intel/pinctrl-denverton.c302
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.c32
-rw-r--r--drivers/pinctrl/intel/pinctrl-lewisburg.c343
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-mt2701.h12
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-armada-37xx.c47
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-abx500.c2
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-nomadik.c2
-rw-r--r--drivers/pinctrl/pinconf-generic.c9
-rw-r--r--drivers/pinctrl/pinconf.c14
-rw-r--r--drivers/pinctrl/pinconf.h24
-rw-r--r--drivers/pinctrl/pinctrl-adi2.c4
-rw-r--r--drivers/pinctrl/pinctrl-amd.c79
-rw-r--r--drivers/pinctrl/pinctrl-amd.h1
-rw-r--r--drivers/pinctrl/pinctrl-artpec6.c2
-rw-r--r--drivers/pinctrl/pinctrl-at91-pio4.c11
-rw-r--r--drivers/pinctrl/pinctrl-coh901.c2
-rw-r--r--drivers/pinctrl/pinctrl-digicolor.c4
-rw-r--r--drivers/pinctrl/pinctrl-gemini.c2359
-rw-r--r--drivers/pinctrl/pinctrl-ingenic.c6
-rw-r--r--drivers/pinctrl/pinctrl-rk805.c493
-rw-r--r--drivers/pinctrl/pinctrl-rockchip.c315
-rw-r--r--drivers/pinctrl/pinctrl-rza1.c10
-rw-r--r--drivers/pinctrl/pinctrl-st.c12
-rw-r--r--drivers/pinctrl/pinctrl-tb10x.c8
-rw-r--r--drivers/pinctrl/pinctrl-tz1090-pdc.c6
-rw-r--r--drivers/pinctrl/pinctrl-tz1090.c6
-rw-r--r--drivers/pinctrl/pinctrl-zynq.c34
-rw-r--r--drivers/pinctrl/pinmux.c16
-rw-r--r--drivers/pinctrl/pinmux.h29
-rw-r--r--drivers/pinctrl/qcom/pinctrl-apq8064.c42
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ipq4019.c432
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.c27
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.h16
-rw-r--r--drivers/pinctrl/qcom/pinctrl-spmi-gpio.c323
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c2
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos.c32
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos.h1
-rw-r--r--drivers/pinctrl/samsung/pinctrl-s3c24xx.c37
-rw-r--r--drivers/pinctrl/samsung/pinctrl-s3c64xx.c40
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.c18
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.h15
-rw-r--r--drivers/pinctrl/sh-pfc/Kconfig5
-rw-r--r--drivers/pinctrl/sh-pfc/Makefile1
-rw-r--r--drivers/pinctrl/sh-pfc/core.c6
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7791.c15
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7795.c1082
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7796.c146
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a77995.c1812
-rw-r--r--drivers/pinctrl/sh-pfc/pinctrl.c11
-rw-r--r--drivers/pinctrl/sh-pfc/sh_pfc.h23
-rw-r--r--drivers/pinctrl/sirf/pinctrl-atlas7.c13
-rw-r--r--drivers/pinctrl/sirf/pinctrl-sirf.c10
-rw-r--r--drivers/pinctrl/sprd/Kconfig20
-rw-r--r--drivers/pinctrl/sprd/Makefile2
-rw-r--r--drivers/pinctrl/sprd/pinctrl-sprd-sc9860.c972
-rw-r--r--drivers/pinctrl/sprd/pinctrl-sprd.c1117
-rw-r--r--drivers/pinctrl/sprd/pinctrl-sprd.h67
-rw-r--r--drivers/pinctrl/stm32/pinctrl-stm32.c2
-rw-r--r--drivers/pinctrl/sunxi/Kconfig2
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c273
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun50i-h5.c26
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun6i-a31-r.c6
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun8i-a23-r.c6
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun8i-h3-r.c4
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun8i-v3s.c1
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.c3
-rw-r--r--drivers/pinctrl/tegra/pinctrl-tegra-xusb.c2
-rw-r--r--drivers/pinctrl/ti/pinctrl-ti-iodelay.c4
-rw-r--r--drivers/pinctrl/uniphier/Kconfig4
-rw-r--r--drivers/pinctrl/uniphier/Makefile1
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-core.c279
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-ld11.c665
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c714
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-ld4.c273
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-ld6b.c386
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-pro4.c453
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-pro5.c458
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-pxs2.c386
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-pxs3.c989
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-sld8.c273
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier.h42
-rw-r--r--drivers/pinctrl/vt8500/pinctrl-wmt.c8
-rw-r--r--drivers/pinctrl/zte/pinctrl-zx.c7
-rw-r--r--drivers/platform/chrome/chromeos_laptop.c2
-rw-r--r--drivers/platform/chrome/chromeos_pstore.c2
-rw-r--r--drivers/platform/chrome/cros_ec_lpc.c2
-rw-r--r--drivers/platform/x86/alienware-wmi.c40
-rw-r--r--drivers/platform/x86/asus-wmi.c4
-rw-r--r--drivers/platform/x86/compal-laptop.c2
-rw-r--r--drivers/platform/x86/dell-wmi.c69
-rw-r--r--drivers/platform/x86/hdaps.c2
-rw-r--r--drivers/platform/x86/hp-wmi.c30
-rw-r--r--drivers/platform/x86/ibm_rtl.c4
-rw-r--r--drivers/platform/x86/ideapad-laptop.c69
-rw-r--r--drivers/platform/x86/intel-hid.c21
-rw-r--r--drivers/platform/x86/intel-vbtn.c2
-rw-r--r--drivers/platform/x86/intel_mid_powerbtn.c10
-rw-r--r--drivers/platform/x86/intel_oaktrail.c2
-rw-r--r--drivers/platform/x86/intel_pmc_core.c31
-rw-r--r--drivers/platform/x86/intel_pmc_core.h30
-rw-r--r--drivers/platform/x86/intel_scu_ipc.c6
-rw-r--r--drivers/platform/x86/intel_telemetry_debugfs.c1
-rw-r--r--drivers/platform/x86/intel_telemetry_pltdrv.c36
-rw-r--r--drivers/platform/x86/mlx-platform.c2
-rw-r--r--drivers/platform/x86/msi-laptop.c2
-rw-r--r--drivers/platform/x86/msi-wmi.c2
-rw-r--r--drivers/platform/x86/mxm-wmi.c4
-rw-r--r--drivers/platform/x86/peaq-wmi.c4
-rw-r--r--drivers/platform/x86/samsung-laptop.c2
-rw-r--r--drivers/platform/x86/samsung-q10.c2
-rw-r--r--drivers/platform/x86/sony-laptop.c2
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c38
-rw-r--r--drivers/platform/x86/toshiba-wmi.c2
-rw-r--r--drivers/platform/x86/wmi.c6
-rw-r--r--drivers/pnp/pnpbios/core.c2
-rw-r--r--drivers/power/avs/rockchip-io-domain.c38
-rw-r--r--drivers/power/reset/at91-sama5d2_shdwc.c4
-rw-r--r--drivers/power/supply/Kconfig23
-rw-r--r--drivers/power/supply/Makefile2
-rw-r--r--drivers/power/supply/act8945a_charger.c4
-rw-r--r--drivers/power/supply/bq24190_charger.c346
-rw-r--r--drivers/power/supply/bq27xxx_battery.c575
-rw-r--r--drivers/power/supply/bq27xxx_battery_hdq.c135
-rw-r--r--drivers/power/supply/bq27xxx_battery_i2c.c16
-rw-r--r--drivers/power/supply/charger-manager.c9
-rw-r--r--drivers/power/supply/ds2780_battery.c4
-rw-r--r--drivers/power/supply/ds2781_battery.c4
-rw-r--r--drivers/power/supply/lp8788-charger.c18
-rw-r--r--drivers/power/supply/ltc2941-battery-gauge.c156
-rw-r--r--drivers/power/supply/max17042_battery.c42
-rw-r--r--drivers/power/supply/max1721x_battery.c448
-rw-r--r--drivers/power/supply/olpc_battery.c4
-rw-r--r--drivers/power/supply/pcf50633-charger.c2
-rw-r--r--drivers/power/supply/power_supply_core.c54
-rw-r--r--drivers/power/supply/sbs-battery.c26
-rw-r--r--drivers/power/supply/twl4030_charger.c2
-rw-r--r--drivers/power/supply/wm831x_power.c72
-rw-r--r--drivers/pps/Kconfig7
-rw-r--r--drivers/pps/clients/Kconfig7
-rw-r--r--drivers/pps/generators/Kconfig3
-rw-r--r--drivers/ptp/ptp_dte.c2
-rw-r--r--drivers/ptp/ptp_ixp46x.c2
-rw-r--r--drivers/ptp/ptp_kvm.c2
-rw-r--r--drivers/ptp/ptp_pch.c2
-rw-r--r--drivers/pwm/Kconfig23
-rw-r--r--drivers/pwm/Makefile2
-rw-r--r--drivers/pwm/pwm-bcm2835.c2
-rw-r--r--drivers/pwm/pwm-hibvt.c2
-rw-r--r--drivers/pwm/pwm-mediatek.c78
-rw-r--r--drivers/pwm/pwm-meson.c2
-rw-r--r--drivers/pwm/pwm-pca9685.c14
-rw-r--r--drivers/pwm/pwm-renesas-tpu.c1
-rw-r--r--drivers/pwm/pwm-rockchip.c281
-rw-r--r--drivers/pwm/pwm-samsung.c70
-rw-r--r--drivers/pwm/pwm-stm32-lp.c246
-rw-r--r--drivers/pwm/pwm-tegra.c2
-rw-r--r--drivers/pwm/pwm-tiecap.c90
-rw-r--r--drivers/pwm/pwm-tiehrpwm.c122
-rw-r--r--drivers/pwm/pwm-twl-led.c2
-rw-r--r--drivers/pwm/pwm-twl.c2
-rw-r--r--drivers/pwm/pwm-vt8500.c1
-rw-r--r--drivers/pwm/pwm-zx.c282
-rw-r--r--drivers/regulator/Kconfig29
-rw-r--r--drivers/regulator/Makefile2
-rw-r--r--drivers/regulator/axp20x-regulator.c6
-rw-r--r--drivers/regulator/core.c16
-rw-r--r--drivers/regulator/cpcap-regulator.c21
-rw-r--r--drivers/regulator/da9063-regulator.c2
-rw-r--r--drivers/regulator/fan53555.c15
-rw-r--r--drivers/regulator/ltc3589.c2
-rw-r--r--drivers/regulator/max1586.c2
-rw-r--r--drivers/regulator/mt6380-regulator.c352
-rw-r--r--drivers/regulator/of_regulator.c4
-rw-r--r--drivers/regulator/pv88090-regulator.c11
-rw-r--r--drivers/regulator/pv88090-regulator.h8
-rw-r--r--drivers/regulator/pwm-regulator.c6
-rw-r--r--drivers/regulator/qcom_rpm-regulator.c5
-rw-r--r--drivers/regulator/qcom_smd-regulator.c5
-rw-r--r--drivers/regulator/rk808-regulator.c130
-rw-r--r--drivers/regulator/rn5t618-regulator.c35
-rw-r--r--drivers/regulator/s5m8767.c4
-rw-r--r--drivers/regulator/stm32-vrefbuf.c202
-rw-r--r--drivers/regulator/twl-regulator.c2
-rw-r--r--drivers/regulator/twl6030-regulator.c2
-rw-r--r--drivers/remoteproc/Kconfig10
-rw-r--r--drivers/remoteproc/Makefile1
-rw-r--r--drivers/remoteproc/da8xx_remoteproc.c98
-rw-r--r--drivers/remoteproc/imx_rproc.c426
-rw-r--r--drivers/remoteproc/keystone_remoteproc.c3
-rw-r--r--drivers/remoteproc/qcom_adsp_pil.c14
-rw-r--r--drivers/remoteproc/qcom_common.c122
-rw-r--r--drivers/remoteproc/qcom_common.h21
-rw-r--r--drivers/remoteproc/qcom_q6v5_pil.c6
-rw-r--r--drivers/remoteproc/remoteproc_core.c35
-rw-r--r--drivers/remoteproc/remoteproc_internal.h1
-rw-r--r--drivers/remoteproc/st_remoteproc.c6
-rw-r--r--drivers/reset/Kconfig15
-rw-r--r--drivers/reset/Makefile3
-rw-r--r--drivers/reset/core.c238
-rw-r--r--drivers/reset/reset-gemini.c110
-rw-r--r--drivers/reset/reset-hsdk-v1.c137
-rw-r--r--drivers/reset/reset-lantiq.c212
-rw-r--r--drivers/reset/reset-socfpga.c4
-rw-r--r--drivers/reset/reset-sunxi.c4
-rw-r--r--drivers/reset/reset-uniphier.c117
-rw-r--r--drivers/reset/reset-zx2967.c2
-rw-r--r--drivers/rpmsg/Kconfig16
-rw-r--r--drivers/rpmsg/Makefile2
-rw-r--r--drivers/rpmsg/qcom_glink_native.c1612
-rw-r--r--drivers/rpmsg/qcom_glink_native.h45
-rw-r--r--drivers/rpmsg/qcom_glink_rpm.c1026
-rw-r--r--drivers/rpmsg/qcom_glink_smem.c316
-rw-r--r--drivers/rpmsg/qcom_smd.c1
-rw-r--r--drivers/rpmsg/virtio_rpmsg_bus.c47
-rw-r--r--drivers/rtc/Kconfig30
-rw-r--r--drivers/rtc/Makefile2
-rw-r--r--drivers/rtc/rtc-dev.c20
-rw-r--r--drivers/rtc/rtc-dm355evm.c2
-rw-r--r--drivers/rtc/rtc-ds1307.c458
-rw-r--r--drivers/rtc/rtc-ds1672.c2
-rw-r--r--drivers/rtc/rtc-em3027.c2
-rw-r--r--drivers/rtc/rtc-goldfish.c237
-rw-r--r--drivers/rtc/rtc-m41t80.c67
-rw-r--r--drivers/rtc/rtc-max6900.c2
-rw-r--r--drivers/rtc/rtc-max8925.c2
-rw-r--r--drivers/rtc/rtc-mxc.c21
-rw-r--r--drivers/rtc/rtc-puv3.c72
-rw-r--r--drivers/rtc/rtc-pxa.c4
-rw-r--r--drivers/rtc/rtc-rtd119x.c242
-rw-r--r--drivers/rtc/rtc-rv3029c2.c2
-rw-r--r--drivers/rtc/rtc-s35390a.c104
-rw-r--r--drivers/rtc/rtc-sa1100.c65
-rw-r--r--drivers/rtc/rtc-sun6i.c34
-rw-r--r--drivers/rtc/rtc-twl.c2
-rw-r--r--drivers/rtc/rtc-vr41xx.c18
-rw-r--r--drivers/s390/block/dasd.c386
-rw-r--r--drivers/s390/block/dasd_3990_erp.c2
-rw-r--r--drivers/s390/block/dasd_devmap.c11
-rw-r--r--drivers/s390/block/dasd_diag.c2
-rw-r--r--drivers/s390/block/dasd_eckd.c8
-rw-r--r--drivers/s390/block/dasd_eckd.h2
-rw-r--r--drivers/s390/block/dasd_erp.c2
-rw-r--r--drivers/s390/block/dasd_fba.c202
-rw-r--r--drivers/s390/block/dasd_int.h38
-rw-r--r--drivers/s390/block/dasd_proc.c2
-rw-r--r--drivers/s390/block/dcssblk.c4
-rw-r--r--drivers/s390/block/scm_blk.c13
-rw-r--r--drivers/s390/block/xpram.c2
-rw-r--r--drivers/s390/char/Kconfig11
-rw-r--r--drivers/s390/char/raw3270.c2
-rw-r--r--drivers/s390/char/sclp_cmd.c1
-rw-r--r--drivers/s390/char/sclp_config.c2
-rw-r--r--drivers/s390/char/sclp_early.c6
-rw-r--r--drivers/s390/char/sclp_ocf.c2
-rw-r--r--drivers/s390/char/tape_core.c2
-rw-r--r--drivers/s390/char/vmcp.c112
-rw-r--r--drivers/s390/char/vmcp.h30
-rw-r--r--drivers/s390/cio/chp.c4
-rw-r--r--drivers/s390/cio/device.c4
-rw-r--r--drivers/s390/cio/vfio_ccw_cp.c2
-rw-r--r--drivers/s390/crypto/ap_asm.h9
-rw-r--r--drivers/s390/crypto/ap_bus.c49
-rw-r--r--drivers/s390/crypto/ap_bus.h47
-rw-r--r--drivers/s390/crypto/ap_queue.c26
-rw-r--r--drivers/s390/crypto/zcrypt_card.c2
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype6.c2
-rw-r--r--drivers/s390/crypto/zcrypt_queue.c2
-rw-r--r--drivers/s390/net/ctcm_main.c2
-rw-r--r--drivers/s390/net/lcs.c28
-rw-r--r--drivers/s390/net/netiucv.c4
-rw-r--r--drivers/s390/net/qeth_core.h17
-rw-r--r--drivers/s390/net/qeth_core_main.c205
-rw-r--r--drivers/s390/net/qeth_core_sys.c2
-rw-r--r--drivers/s390/net/qeth_l2_main.c343
-rw-r--r--drivers/s390/net/qeth_l3_main.c67
-rw-r--r--drivers/s390/net/qeth_l3_sys.c25
-rw-r--r--drivers/s390/scsi/zfcp_aux.c1
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c95
-rw-r--r--drivers/s390/scsi/zfcp_dbf.h25
-rw-r--r--drivers/s390/scsi/zfcp_erp.c5
-rw-r--r--drivers/s390/scsi/zfcp_ext.h1
-rw-r--r--drivers/s390/scsi/zfcp_fc.c52
-rw-r--r--drivers/s390/scsi/zfcp_fc.h25
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c35
-rw-r--r--drivers/s390/scsi/zfcp_fsf.h12
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c2
-rw-r--r--drivers/s390/scsi/zfcp_qdio.h17
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c18
-rw-r--r--drivers/scsi/53c700.c23
-rw-r--r--drivers/scsi/Kconfig13
-rw-r--r--drivers/scsi/NCR5380.c4
-rw-r--r--drivers/scsi/NCR_Q720.c3
-rw-r--r--drivers/scsi/a2091.c17
-rw-r--r--drivers/scsi/a3000.c17
-rw-r--r--drivers/scsi/aacraid/aachba.c334
-rw-r--r--drivers/scsi/aacraid/aacraid.h3
-rw-r--r--drivers/scsi/aacraid/comminit.c6
-rw-r--r--drivers/scsi/aacraid/commsup.c3
-rw-r--r--drivers/scsi/aacraid/linit.c320
-rw-r--r--drivers/scsi/aha152x.c13
-rw-r--r--drivers/scsi/aha1542.c2
-rw-r--r--drivers/scsi/aic7xxx/Makefile6
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_reg.h_shipped1251
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_reg_print.c_shipped34
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_core.c1
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm.c5
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm.h1
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_reg.h_shipped44
-rw-r--r--drivers/scsi/aic94xx/aic94xx_init.c6
-rw-r--r--drivers/scsi/arcmsr/arcmsr_attr.c6
-rw-r--r--drivers/scsi/arm/acornscsi.c11
-rw-r--r--drivers/scsi/arm/cumana_1.c2
-rw-r--r--drivers/scsi/arm/oak.c2
-rw-r--r--drivers/scsi/atari_scsi.c6
-rw-r--r--drivers/scsi/be2iscsi/be_iscsi.c6
-rw-r--r--drivers/scsi/be2iscsi/be_main.c6
-rw-r--r--drivers/scsi/be2iscsi/be_main.h2
-rw-r--r--drivers/scsi/bfa/bfad_im.c37
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc.h1
-rw-r--r--drivers/scsi/ch.c22
-rw-r--r--drivers/scsi/csiostor/csio_hw.c4
-rw-r--r--drivers/scsi/csiostor/csio_hw.h2
-rw-r--r--drivers/scsi/csiostor/csio_init.c23
-rw-r--r--drivers/scsi/csiostor/csio_scsi.c6
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c3
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c27
-rw-r--r--drivers/scsi/cxlflash/main.c3
-rw-r--r--drivers/scsi/cxlflash/superpipe.c14
-rw-r--r--drivers/scsi/cxlflash/vlun.c6
-rw-r--r--drivers/scsi/dmx3191d.c2
-rw-r--r--drivers/scsi/dpt_i2o.c5
-rw-r--r--drivers/scsi/eata.c9
-rw-r--r--drivers/scsi/esas2r/esas2r_main.c2
-rw-r--r--drivers/scsi/esp_scsi.c53
-rw-r--r--drivers/scsi/esp_scsi.h1
-rw-r--r--drivers/scsi/fcoe/fcoe_sysfs.c4
-rw-r--r--drivers/scsi/fdomain.c6
-rw-r--r--drivers/scsi/fdomain.h2
-rw-r--r--drivers/scsi/fnic/fnic.h2
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c4
-rw-r--r--drivers/scsi/g_NCR5380.c283
-rw-r--r--drivers/scsi/gdth.c2
-rw-r--r--drivers/scsi/gdth_proc.c2
-rw-r--r--drivers/scsi/gvp11.c18
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas.h18
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c198
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v2_hw.c605
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c237
-rw-r--r--drivers/scsi/hosts.c8
-rw-r--r--drivers/scsi/hpsa.c117
-rw-r--r--drivers/scsi/hpsa.h81
-rw-r--r--drivers/scsi/hptiop.c11
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c8
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c2
-rw-r--r--drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c2
-rw-r--r--drivers/scsi/imm.c1
-rw-r--r--drivers/scsi/ipr.c34
-rw-r--r--drivers/scsi/ipr.h2
-rw-r--r--drivers/scsi/isci/init.c2
-rw-r--r--drivers/scsi/iscsi_tcp.c2
-rw-r--r--drivers/scsi/lasi700.c6
-rw-r--r--drivers/scsi/libfc/fc_fcp.c2
-rw-r--r--drivers/scsi/libiscsi.c2
-rw-r--r--drivers/scsi/libsas/Kconfig1
-rw-r--r--drivers/scsi/libsas/sas_ata.c1
-rw-r--r--drivers/scsi/libsas/sas_expander.c70
-rw-r--r--drivers/scsi/libsas/sas_host_smp.c106
-rw-r--r--drivers/scsi/libsas/sas_internal.h12
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c15
-rw-r--r--drivers/scsi/lpfc/lpfc.h19
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c52
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.h10
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c9
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h5
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c11
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_disc.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c93
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c12
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h18
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h23
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c73
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c35
-rw-r--r--drivers/scsi/lpfc/lpfc_mem.c90
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c25
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c31
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.c279
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.h14
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c12
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c62
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h23
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/mac53c94.c13
-rw-r--r--drivers/scsi/mac_esp.c37
-rw-r--r--drivers/scsi/mac_scsi.c4
-rw-r--r--drivers/scsi/megaraid.c6
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.c32
-rw-r--r--drivers/scsi/megaraid/megaraid_mm.c29
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h5
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c112
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fp.c40
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c216
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c85
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h2
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_transport.c230
-rw-r--r--drivers/scsi/mvme147.c16
-rw-r--r--drivers/scsi/mvsas/mv_init.c12
-rw-r--r--drivers/scsi/mvsas/mv_sas.c6
-rw-r--r--drivers/scsi/nsp32.c22
-rw-r--r--drivers/scsi/osst.c8
-rw-r--r--drivers/scsi/pcmcia/fdomain_stub.c2
-rw-r--r--drivers/scsi/pcmcia/qlogic_stub.c4
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c7
-rw-r--r--drivers/scsi/pmcraid.c17
-rw-r--r--drivers/scsi/pmcraid.h2
-rw-r--r--drivers/scsi/ppa.c1
-rw-r--r--drivers/scsi/qedf/qedf.h2
-rw-r--r--drivers/scsi/qedf/qedf_els.c14
-rw-r--r--drivers/scsi/qedf/qedf_fip.c35
-rw-r--r--drivers/scsi/qedf/qedf_main.c113
-rw-r--r--drivers/scsi/qedf/qedf_version.h6
-rw-r--r--drivers/scsi/qedi/qedi.h5
-rw-r--r--drivers/scsi/qedi/qedi_iscsi.c2
-rw-r--r--drivers/scsi/qedi/qedi_main.c12
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c86
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c10
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h118
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h28
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h22
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c18
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c183
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c55
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c78
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c263
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c14
-rw-r--r--drivers/scsi/qla2xxx/qla_mr.c7
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.c161
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.h17
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c155
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c29
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_tmpl.c31
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h2
-rw-r--r--drivers/scsi/qlogicfas.c2
-rw-r--r--drivers/scsi/qlogicfas408.c6
-rw-r--r--drivers/scsi/qlogicfas408.h2
-rw-r--r--drivers/scsi/qlogicpti.c2
-rw-r--r--drivers/scsi/scsi.c144
-rw-r--r--drivers/scsi/scsi_debug.c5
-rw-r--r--drivers/scsi/scsi_debugfs.c4
-rw-r--r--drivers/scsi/scsi_error.c10
-rw-r--r--drivers/scsi/scsi_ioctl.c4
-rw-r--r--drivers/scsi/scsi_lib.c131
-rw-r--r--drivers/scsi/scsi_priv.h2
-rw-r--r--drivers/scsi/scsi_scan.c2
-rw-r--r--drivers/scsi/scsi_sysfs.c33
-rw-r--r--drivers/scsi/scsi_transport_fc.c37
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c6
-rw-r--r--drivers/scsi/scsi_transport_sas.c121
-rw-r--r--drivers/scsi/scsi_transport_srp.c7
-rw-r--r--drivers/scsi/sd.c10
-rw-r--r--drivers/scsi/sd_zbc.c9
-rw-r--r--drivers/scsi/ses.c64
-rw-r--r--drivers/scsi/sg.c24
-rw-r--r--drivers/scsi/sgiwd93.c15
-rw-r--r--drivers/scsi/smartpqi/smartpqi.h44
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c145
-rw-r--r--drivers/scsi/smartpqi/smartpqi_sas_transport.c9
-rw-r--r--drivers/scsi/smartpqi/smartpqi_sis.c111
-rw-r--r--drivers/scsi/smartpqi/smartpqi_sis.h4
-rw-r--r--drivers/scsi/sr.c2
-rw-r--r--drivers/scsi/st.c4
-rw-r--r--drivers/scsi/storvsc_drv.c2
-rw-r--r--drivers/scsi/sun3_scsi.c4
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_hipd.c13
-rw-r--r--drivers/scsi/ufs/ufshcd.c2
-rw-r--r--drivers/scsi/virtio_scsi.c4
-rw-r--r--drivers/scsi/wd33c93.c2
-rw-r--r--drivers/scsi/zalon.c8
-rw-r--r--drivers/sfi/sfi_core.c23
-rw-r--r--drivers/soc/Kconfig1
-rw-r--r--drivers/soc/Makefile2
-rw-r--r--drivers/soc/amlogic/Kconfig12
-rw-r--r--drivers/soc/amlogic/Makefile1
-rw-r--r--drivers/soc/amlogic/meson-gx-socinfo.c177
-rw-r--r--drivers/soc/fsl/qbman/bman_ccsr.c10
-rw-r--r--drivers/soc/fsl/qbman/bman_portal.c8
-rw-r--r--drivers/soc/fsl/qbman/qman_ccsr.c12
-rw-r--r--drivers/soc/fsl/qbman/qman_portal.c11
-rw-r--r--drivers/soc/fsl/qe/gpio.c4
-rw-r--r--drivers/soc/imx/gpcv2.c15
-rw-r--r--drivers/soc/lantiq/Makefile2
-rw-r--r--drivers/soc/lantiq/fpi-bus.c87
-rw-r--r--drivers/soc/lantiq/gphy.c260
-rw-r--r--drivers/soc/mediatek/mtk-pmic-wrap.c10
-rw-r--r--drivers/soc/mediatek/mtk-scpsys.c247
-rw-r--r--drivers/soc/qcom/Kconfig13
-rw-r--r--drivers/soc/qcom/Makefile1
-rw-r--r--drivers/soc/qcom/glink_ssr.c164
-rw-r--r--drivers/soc/qcom/mdt_loader.c5
-rw-r--r--drivers/soc/qcom/smsm.c3
-rw-r--r--drivers/soc/qcom/wcnss_ctrl.c1
-rw-r--r--drivers/soc/renesas/Kconfig7
-rw-r--r--drivers/soc/renesas/Makefile1
-rw-r--r--drivers/soc/renesas/r8a77995-sysc.c31
-rw-r--r--drivers/soc/renesas/rcar-rst.c5
-rw-r--r--drivers/soc/renesas/rcar-sysc.c9
-rw-r--r--drivers/soc/renesas/rcar-sysc.h1
-rw-r--r--drivers/soc/renesas/renesas-soc.c8
-rw-r--r--drivers/soc/rockchip/grf.c14
-rw-r--r--drivers/soc/rockchip/pm_domains.c32
-rw-r--r--drivers/soc/samsung/pm_domains.c10
-rw-r--r--drivers/soc/sunxi/sunxi_sram.c57
-rw-r--r--drivers/soc/tegra/Kconfig5
-rw-r--r--drivers/soc/tegra/fuse/fuse-tegra.c56
-rw-r--r--drivers/soc/tegra/pmc.c4
-rw-r--r--drivers/soc/ti/knav_qmss_queue.c3
-rw-r--r--drivers/soc/ti/ti_sci_pm_domains.c2
-rw-r--r--drivers/soc/versatile/soc-realview.c2
-rw-r--r--drivers/spi/Kconfig5
-rw-r--r--drivers/spi/spi-altera.c163
-rw-r--r--drivers/spi/spi-ath79.c13
-rw-r--r--drivers/spi/spi-bcm-qspi.c89
-rw-r--r--drivers/spi/spi-bcm63xx-hsspi.c10
-rw-r--r--drivers/spi/spi-bcm63xx.c4
-rw-r--r--drivers/spi/spi-cadence.c4
-rw-r--r--drivers/spi/spi-ep93xx.c501
-rw-r--r--drivers/spi/spi-falcon.c5
-rw-r--r--drivers/spi/spi-imx.c218
-rw-r--r--drivers/spi/spi-loopback-test.c34
-rw-r--r--drivers/spi/spi-omap2-mcspi.c4
-rw-r--r--drivers/spi/spi-orion.c4
-rw-r--r--drivers/spi/spi-pic32.c4
-rw-r--r--drivers/spi/spi-pl022.c2
-rw-r--r--drivers/spi/spi-pxa2xx.c35
-rw-r--r--drivers/spi/spi-pxa2xx.h2
-rw-r--r--drivers/spi/spi-qup.c564
-rw-r--r--drivers/spi/spi-rockchip.c60
-rw-r--r--drivers/spi/spi-sh-msiof.c32
-rw-r--r--drivers/spi/spi-sh.c4
-rw-r--r--drivers/spi/spi-stm32.c2
-rw-r--r--drivers/spi/spi-sun6i.c2
-rw-r--r--drivers/spi/spi-tegra114.c2
-rw-r--r--drivers/spi/spi-tegra20-sflash.c2
-rw-r--r--drivers/spi/spi-tegra20-slink.c2
-rw-r--r--drivers/spi/spi-xlp.c4
-rw-r--r--drivers/spi/spi.c142
-rw-r--r--drivers/spmi/spmi-pmic-arb.c837
-rw-r--r--drivers/spmi/spmi.c14
-rw-r--r--drivers/staging/Kconfig6
-rw-r--r--drivers/staging/Makefile4
-rw-r--r--drivers/staging/android/ashmem.c29
-rw-r--r--drivers/staging/android/ion/ion.h12
-rw-r--r--drivers/staging/android/ion/ion_cma_heap.c5
-rw-r--r--drivers/staging/android/ion/ion_system_heap.c2
-rw-r--r--drivers/staging/ccree/Kconfig9
-rw-r--r--drivers/staging/ccree/Makefile2
-rw-r--r--drivers/staging/ccree/cc_hw_queue_defs.h3
-rw-r--r--drivers/staging/ccree/ssi_aead.c246
-rw-r--r--drivers/staging/ccree/ssi_aead.h12
-rw-r--r--drivers/staging/ccree/ssi_buffer_mgr.c473
-rw-r--r--drivers/staging/ccree/ssi_cipher.c185
-rw-r--r--drivers/staging/ccree/ssi_driver.c64
-rw-r--r--drivers/staging/ccree/ssi_driver.h1
-rw-r--r--drivers/staging/ccree/ssi_fips.c119
-rw-r--r--drivers/staging/ccree/ssi_fips.h58
-rw-r--r--drivers/staging/ccree/ssi_fips_data.h306
-rw-r--r--drivers/staging/ccree/ssi_fips_ext.c92
-rw-r--r--drivers/staging/ccree/ssi_fips_ll.c1649
-rw-r--r--drivers/staging/ccree/ssi_fips_local.c357
-rw-r--r--drivers/staging/ccree/ssi_fips_local.h67
-rw-r--r--drivers/staging/ccree/ssi_hash.c276
-rw-r--r--drivers/staging/ccree/ssi_ivgen.c13
-rw-r--r--drivers/staging/ccree/ssi_pm.c4
-rw-r--r--drivers/staging/ccree/ssi_request_mgr.c54
-rw-r--r--drivers/staging/ccree/ssi_sram_mgr.c6
-rw-r--r--drivers/staging/ccree/ssi_sysfs.c80
-rw-r--r--drivers/staging/comedi/comedi_buf.c2
-rw-r--r--drivers/staging/comedi/comedi_fops.c2
-rw-r--r--drivers/staging/comedi/drivers.c4
-rw-r--r--drivers/staging/comedi/drivers/ni_mio_common.c3
-rw-r--r--drivers/staging/comedi/drivers/serial2002.c24
-rw-r--r--drivers/staging/fbtft/fb_st7789v.c2
-rw-r--r--drivers/staging/fbtft/fbtft-core.c4
-rw-r--r--drivers/staging/fsl-dpaa2/Kconfig2
-rw-r--r--drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c6
-rw-r--r--drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h4
-rw-r--r--drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c2
-rw-r--r--drivers/staging/fsl-dpaa2/ethernet/dpni.c2
-rw-r--r--drivers/staging/fsl-mc/bus/Kconfig4
-rw-r--r--drivers/staging/fsl-mc/bus/dpio/qbman-portal.c24
-rw-r--r--drivers/staging/fsl-mc/bus/dprc-driver.c5
-rw-r--r--drivers/staging/fsl-mc/bus/fsl-mc-allocator.c6
-rw-r--r--drivers/staging/fsl-mc/bus/fsl-mc-bus.c4
-rw-r--r--drivers/staging/fsl-mc/bus/fsl-mc-msi.c9
-rw-r--r--drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c11
-rw-r--r--drivers/staging/fsl-mc/bus/mc-io.c11
-rw-r--r--drivers/staging/fsl-mc/bus/mc-sys.c36
-rw-r--r--drivers/staging/fsl-mc/include/dpaa2-io.h1
-rw-r--r--drivers/staging/goldfish/goldfish_nand.c24
-rw-r--r--drivers/staging/greybus/arche-platform.c14
-rw-r--r--drivers/staging/greybus/audio_codec.c2
-rw-r--r--drivers/staging/greybus/gbphy.c2
-rw-r--r--drivers/staging/greybus/interface.c40
-rw-r--r--drivers/staging/greybus/light.c46
-rw-r--r--drivers/staging/greybus/spilib.h3
-rw-r--r--drivers/staging/greybus/tools/loopback_test.c48
-rw-r--r--drivers/staging/greybus/usb.c2
-rw-r--r--drivers/staging/greybus/vibrator.c8
-rw-r--r--drivers/staging/gs_fpgaboot/gs_fpgaboot.c90
-rw-r--r--drivers/staging/gs_fpgaboot/gs_fpgaboot.h2
-rw-r--r--drivers/staging/gs_fpgaboot/io.c4
-rw-r--r--drivers/staging/iio/adc/ad7280a.c21
-rw-r--r--drivers/staging/iio/adc/ad7606_par.c4
-rw-r--r--drivers/staging/iio/light/tsl2x7x.c372
-rw-r--r--drivers/staging/irda/TODO4
-rw-r--r--drivers/staging/irda/drivers/Kconfig (renamed from drivers/net/irda/Kconfig)0
-rw-r--r--drivers/staging/irda/drivers/Makefile (renamed from drivers/net/irda/Makefile)2
-rw-r--r--drivers/staging/irda/drivers/act200l-sir.c (renamed from drivers/net/irda/act200l-sir.c)0
-rw-r--r--drivers/staging/irda/drivers/actisys-sir.c (renamed from drivers/net/irda/actisys-sir.c)0
-rw-r--r--drivers/staging/irda/drivers/ali-ircc.c (renamed from drivers/net/irda/ali-ircc.c)0
-rw-r--r--drivers/staging/irda/drivers/ali-ircc.h (renamed from drivers/net/irda/ali-ircc.h)0
-rw-r--r--drivers/staging/irda/drivers/au1k_ir.c (renamed from drivers/net/irda/au1k_ir.c)0
-rw-r--r--drivers/staging/irda/drivers/bfin_sir.c (renamed from drivers/net/irda/bfin_sir.c)0
-rw-r--r--drivers/staging/irda/drivers/bfin_sir.h (renamed from drivers/net/irda/bfin_sir.h)0
-rw-r--r--drivers/staging/irda/drivers/donauboe.c (renamed from drivers/net/irda/donauboe.c)0
-rw-r--r--drivers/staging/irda/drivers/donauboe.h (renamed from drivers/net/irda/donauboe.h)0
-rw-r--r--drivers/staging/irda/drivers/esi-sir.c (renamed from drivers/net/irda/esi-sir.c)0
-rw-r--r--drivers/staging/irda/drivers/girbil-sir.c (renamed from drivers/net/irda/girbil-sir.c)0
-rw-r--r--drivers/staging/irda/drivers/irda-usb.c (renamed from drivers/net/irda/irda-usb.c)2
-rw-r--r--drivers/staging/irda/drivers/irda-usb.h (renamed from drivers/net/irda/irda-usb.h)0
-rw-r--r--drivers/staging/irda/drivers/irtty-sir.c (renamed from drivers/net/irda/irtty-sir.c)0
-rw-r--r--drivers/staging/irda/drivers/irtty-sir.h (renamed from drivers/net/irda/irtty-sir.h)0
-rw-r--r--drivers/staging/irda/drivers/kingsun-sir.c (renamed from drivers/net/irda/kingsun-sir.c)2
-rw-r--r--drivers/staging/irda/drivers/ks959-sir.c (renamed from drivers/net/irda/ks959-sir.c)2
-rw-r--r--drivers/staging/irda/drivers/ksdazzle-sir.c (renamed from drivers/net/irda/ksdazzle-sir.c)2
-rw-r--r--drivers/staging/irda/drivers/litelink-sir.c (renamed from drivers/net/irda/litelink-sir.c)0
-rw-r--r--drivers/staging/irda/drivers/ma600-sir.c (renamed from drivers/net/irda/ma600-sir.c)0
-rw-r--r--drivers/staging/irda/drivers/mcp2120-sir.c (renamed from drivers/net/irda/mcp2120-sir.c)0
-rw-r--r--drivers/staging/irda/drivers/mcs7780.c (renamed from drivers/net/irda/mcs7780.c)2
-rw-r--r--drivers/staging/irda/drivers/mcs7780.h (renamed from drivers/net/irda/mcs7780.h)0
-rw-r--r--drivers/staging/irda/drivers/nsc-ircc.c (renamed from drivers/net/irda/nsc-ircc.c)0
-rw-r--r--drivers/staging/irda/drivers/nsc-ircc.h (renamed from drivers/net/irda/nsc-ircc.h)0
-rw-r--r--drivers/staging/irda/drivers/old_belkin-sir.c (renamed from drivers/net/irda/old_belkin-sir.c)0
-rw-r--r--drivers/staging/irda/drivers/pxaficp_ir.c (renamed from drivers/net/irda/pxaficp_ir.c)0
-rw-r--r--drivers/staging/irda/drivers/sa1100_ir.c (renamed from drivers/net/irda/sa1100_ir.c)0
-rw-r--r--drivers/staging/irda/drivers/sh_sir.c (renamed from drivers/net/irda/sh_sir.c)0
-rw-r--r--drivers/staging/irda/drivers/sir-dev.h (renamed from drivers/net/irda/sir-dev.h)0
-rw-r--r--drivers/staging/irda/drivers/sir_dev.c (renamed from drivers/net/irda/sir_dev.c)0
-rw-r--r--drivers/staging/irda/drivers/sir_dongle.c (renamed from drivers/net/irda/sir_dongle.c)0
-rw-r--r--drivers/staging/irda/drivers/smsc-ircc2.c (renamed from drivers/net/irda/smsc-ircc2.c)0
-rw-r--r--drivers/staging/irda/drivers/smsc-ircc2.h (renamed from drivers/net/irda/smsc-ircc2.h)0
-rw-r--r--drivers/staging/irda/drivers/smsc-sio.h (renamed from drivers/net/irda/smsc-sio.h)0
-rw-r--r--drivers/staging/irda/drivers/stir4200.c (renamed from drivers/net/irda/stir4200.c)2
-rw-r--r--drivers/staging/irda/drivers/tekram-sir.c (renamed from drivers/net/irda/tekram-sir.c)0
-rw-r--r--drivers/staging/irda/drivers/toim3232-sir.c (renamed from drivers/net/irda/toim3232-sir.c)0
-rw-r--r--drivers/staging/irda/drivers/via-ircc.c (renamed from drivers/net/irda/via-ircc.c)0
-rw-r--r--drivers/staging/irda/drivers/via-ircc.h (renamed from drivers/net/irda/via-ircc.h)0
-rw-r--r--drivers/staging/irda/drivers/vlsi_ir.c (renamed from drivers/net/irda/vlsi_ir.c)0
-rw-r--r--drivers/staging/irda/drivers/vlsi_ir.h (renamed from drivers/net/irda/vlsi_ir.h)0
-rw-r--r--drivers/staging/irda/drivers/w83977af.h (renamed from drivers/net/irda/w83977af.h)0
-rw-r--r--drivers/staging/irda/drivers/w83977af_ir.c (renamed from drivers/net/irda/w83977af_ir.c)0
-rw-r--r--drivers/staging/irda/drivers/w83977af_ir.h (renamed from drivers/net/irda/w83977af_ir.h)0
-rw-r--r--drivers/staging/irda/include/net/irda/af_irda.h87
-rw-r--r--drivers/staging/irda/include/net/irda/crc.h29
-rw-r--r--drivers/staging/irda/include/net/irda/discovery.h95
-rw-r--r--drivers/staging/irda/include/net/irda/ircomm_core.h106
-rw-r--r--drivers/staging/irda/include/net/irda/ircomm_event.h83
-rw-r--r--drivers/staging/irda/include/net/irda/ircomm_lmp.h36
-rw-r--r--drivers/staging/irda/include/net/irda/ircomm_param.h147
-rw-r--r--drivers/staging/irda/include/net/irda/ircomm_ttp.h37
-rw-r--r--drivers/staging/irda/include/net/irda/ircomm_tty.h121
-rw-r--r--drivers/staging/irda/include/net/irda/ircomm_tty_attach.h92
-rw-r--r--drivers/staging/irda/include/net/irda/irda.h115
-rw-r--r--drivers/staging/irda/include/net/irda/irda_device.h285
-rw-r--r--drivers/staging/irda/include/net/irda/iriap.h108
-rw-r--r--drivers/staging/irda/include/net/irda/iriap_event.h85
-rw-r--r--drivers/staging/irda/include/net/irda/irias_object.h108
-rw-r--r--drivers/staging/irda/include/net/irda/irlan_client.h42
-rw-r--r--drivers/staging/irda/include/net/irda/irlan_common.h230
-rw-r--r--drivers/staging/irda/include/net/irda/irlan_eth.h32
-rw-r--r--drivers/staging/irda/include/net/irda/irlan_event.h81
-rw-r--r--drivers/staging/irda/include/net/irda/irlan_filter.h35
-rw-r--r--drivers/staging/irda/include/net/irda/irlan_provider.h52
-rw-r--r--drivers/staging/irda/include/net/irda/irlap.h311
-rw-r--r--drivers/staging/irda/include/net/irda/irlap_event.h129
-rw-r--r--drivers/staging/irda/include/net/irda/irlap_frame.h167
-rw-r--r--drivers/staging/irda/include/net/irda/irlmp.h295
-rw-r--r--drivers/staging/irda/include/net/irda/irlmp_event.h98
-rw-r--r--drivers/staging/irda/include/net/irda/irlmp_frame.h62
-rw-r--r--drivers/staging/irda/include/net/irda/irmod.h109
-rw-r--r--drivers/staging/irda/include/net/irda/irqueue.h96
-rw-r--r--drivers/staging/irda/include/net/irda/irttp.h210
-rw-r--r--drivers/staging/irda/include/net/irda/parameters.h100
-rw-r--r--drivers/staging/irda/include/net/irda/qos.h101
-rw-r--r--drivers/staging/irda/include/net/irda/timer.h105
-rw-r--r--drivers/staging/irda/include/net/irda/wrapper.h58
-rw-r--r--drivers/staging/irda/net/Kconfig96
-rw-r--r--drivers/staging/irda/net/Makefile17
-rw-r--r--drivers/staging/irda/net/af_irda.c2695
-rw-r--r--drivers/staging/irda/net/discovery.c417
-rw-r--r--drivers/staging/irda/net/ircomm/Kconfig12
-rw-r--r--drivers/staging/irda/net/ircomm/Makefile8
-rw-r--r--drivers/staging/irda/net/ircomm/ircomm_core.c563
-rw-r--r--drivers/staging/irda/net/ircomm/ircomm_event.c246
-rw-r--r--drivers/staging/irda/net/ircomm/ircomm_lmp.c350
-rw-r--r--drivers/staging/irda/net/ircomm/ircomm_param.c501
-rw-r--r--drivers/staging/irda/net/ircomm/ircomm_ttp.c350
-rw-r--r--drivers/staging/irda/net/ircomm/ircomm_tty.c1329
-rw-r--r--drivers/staging/irda/net/ircomm/ircomm_tty_attach.c987
-rw-r--r--drivers/staging/irda/net/ircomm/ircomm_tty_ioctl.c291
-rw-r--r--drivers/staging/irda/net/irda_device.c316
-rw-r--r--drivers/staging/irda/net/iriap.c1085
-rw-r--r--drivers/staging/irda/net/iriap_event.c496
-rw-r--r--drivers/staging/irda/net/irias_object.c555
-rw-r--r--drivers/staging/irda/net/irlan/Kconfig14
-rw-r--r--drivers/staging/irda/net/irlan/Makefile7
-rw-r--r--drivers/staging/irda/net/irlan/irlan_client.c559
-rw-r--r--drivers/staging/irda/net/irlan/irlan_client_event.c511
-rw-r--r--drivers/staging/irda/net/irlan/irlan_common.c1176
-rw-r--r--drivers/staging/irda/net/irlan/irlan_eth.c340
-rw-r--r--drivers/staging/irda/net/irlan/irlan_event.c60
-rw-r--r--drivers/staging/irda/net/irlan/irlan_filter.c240
-rw-r--r--drivers/staging/irda/net/irlan/irlan_provider.c408
-rw-r--r--drivers/staging/irda/net/irlan/irlan_provider_event.c233
-rw-r--r--drivers/staging/irda/net/irlap.c1207
-rw-r--r--drivers/staging/irda/net/irlap_event.c2316
-rw-r--r--drivers/staging/irda/net/irlap_frame.c1407
-rw-r--r--drivers/staging/irda/net/irlmp.c1996
-rw-r--r--drivers/staging/irda/net/irlmp_event.c886
-rw-r--r--drivers/staging/irda/net/irlmp_frame.c476
-rw-r--r--drivers/staging/irda/net/irmod.c199
-rw-r--r--drivers/staging/irda/net/irnet/Kconfig13
-rw-r--r--drivers/staging/irda/net/irnet/Makefile7
-rw-r--r--drivers/staging/irda/net/irnet/irnet.h522
-rw-r--r--drivers/staging/irda/net/irnet/irnet_irda.c1885
-rw-r--r--drivers/staging/irda/net/irnet/irnet_irda.h178
-rw-r--r--drivers/staging/irda/net/irnet/irnet_ppp.c1189
-rw-r--r--drivers/staging/irda/net/irnet/irnet_ppp.h116
-rw-r--r--drivers/staging/irda/net/irnetlink.c162
-rw-r--r--drivers/staging/irda/net/irproc.c96
-rw-r--r--drivers/staging/irda/net/irqueue.c911
-rw-r--r--drivers/staging/irda/net/irsysctl.c258
-rw-r--r--drivers/staging/irda/net/irttp.c1891
-rw-r--r--drivers/staging/irda/net/parameters.c584
-rw-r--r--drivers/staging/irda/net/qos.c771
-rw-r--r--drivers/staging/irda/net/timer.c231
-rw-r--r--drivers/staging/irda/net/wrapper.c492
-rw-r--r--drivers/staging/ks7010/ks7010_sdio.c4
-rw-r--r--drivers/staging/ks7010/ks_hostif.c4
-rw-r--r--drivers/staging/ks7010/ks_wlan_net.c2
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs.h28
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h104
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_private.h26
-rw-r--r--drivers/staging/lustre/include/linux/lnet/api.h2
-rw-r--r--drivers/staging/lustre/include/linux/lnet/lib-lnet.h15
-rw-r--r--drivers/staging/lustre/include/linux/lnet/lib-types.h50
-rw-r--r--drivers/staging/lustre/include/linux/lnet/socklnd.h15
-rw-r--r--drivers/staging/lustre/include/uapi/linux/lnet/libcfs_debug.h149
-rw-r--r--drivers/staging/lustre/include/uapi/linux/lnet/libcfs_ioctl.h (renamed from drivers/staging/lustre/include/linux/libcfs/libcfs_ioctl.h)0
-rw-r--r--drivers/staging/lustre/include/uapi/linux/lnet/lnet-dlc.h (renamed from drivers/staging/lustre/include/linux/lnet/lib-dlc.h)4
-rw-r--r--drivers/staging/lustre/include/uapi/linux/lnet/lnet-types.h (renamed from drivers/staging/lustre/include/linux/lnet/types.h)0
-rw-r--r--drivers/staging/lustre/include/uapi/linux/lnet/lnetctl.h (renamed from drivers/staging/lustre/include/linux/lnet/lnetctl.h)51
-rw-r--r--drivers/staging/lustre/include/uapi/linux/lnet/lnetst.h (renamed from drivers/staging/lustre/include/linux/lnet/lnetst.h)129
-rw-r--r--drivers/staging/lustre/include/uapi/linux/lnet/nidstr.h (renamed from drivers/staging/lustre/include/linux/lnet/nidstr.h)2
-rw-r--r--drivers/staging/lustre/include/uapi/linux/lnet/socklnd.h (renamed from drivers/staging/lustre/include/linux/lnet/lnet.h)24
-rw-r--r--drivers/staging/lustre/include/uapi/linux/lustre/lustre_cfg.h (renamed from drivers/staging/lustre/lustre/include/lustre_cfg.h)188
-rw-r--r--drivers/staging/lustre/include/uapi/linux/lustre/lustre_fid.h293
-rw-r--r--drivers/staging/lustre/include/uapi/linux/lustre/lustre_fiemap.h (renamed from drivers/staging/lustre/lustre/include/lustre/ll_fiemap.h)6
-rw-r--r--drivers/staging/lustre/include/uapi/linux/lustre/lustre_idl.h (renamed from drivers/staging/lustre/lustre/include/lustre/lustre_idl.h)689
-rw-r--r--drivers/staging/lustre/include/uapi/linux/lustre/lustre_ioctl.h (renamed from drivers/staging/lustre/lustre/include/lustre/lustre_ioctl.h)203
-rw-r--r--drivers/staging/lustre/include/uapi/linux/lustre/lustre_kernelcomm.h (renamed from drivers/staging/lustre/lustre/include/uapi_kernelcomm.h)6
-rw-r--r--drivers/staging/lustre/include/uapi/linux/lustre/lustre_ostid.h236
-rw-r--r--drivers/staging/lustre/include/uapi/linux/lustre/lustre_param.h94
-rw-r--r--drivers/staging/lustre/include/uapi/linux/lustre/lustre_user.h (renamed from drivers/staging/lustre/lustre/include/lustre/lustre_user.h)15
-rw-r--r--drivers/staging/lustre/include/uapi/linux/lustre/lustre_ver.h (renamed from drivers/staging/lustre/lustre/include/lustre_ver.h)0
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/Makefile3
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c2
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h5
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/Makefile3
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h18
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c20
-rw-r--r--drivers/staging/lustre/lnet/libcfs/Makefile3
-rw-r--r--drivers/staging/lustre/lnet/libcfs/debug.c2
-rw-r--r--drivers/staging/lustre/lnet/libcfs/fail.c2
-rw-r--r--drivers/staging/lustre/lnet/libcfs/hash.c49
-rw-r--r--drivers/staging/lustre/lnet/libcfs/libcfs_cpu.c2
-rw-r--r--drivers/staging/lustre/lnet/libcfs/libcfs_lock.c2
-rw-r--r--drivers/staging/lustre/lnet/libcfs/libcfs_mem.c2
-rw-r--r--drivers/staging/lustre/lnet/libcfs/libcfs_string.c2
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-cpu.c2
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.c4
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-curproc.c2
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-debug.c2
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-mem.c2
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-module.c4
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-prim.c2
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-tracefile.c2
-rw-r--r--drivers/staging/lustre/lnet/libcfs/module.c9
-rw-r--r--drivers/staging/lustre/lnet/libcfs/prng.c2
-rw-r--r--drivers/staging/lustre/lnet/libcfs/tracefile.c12
-rw-r--r--drivers/staging/lustre/lnet/libcfs/tracefile.h2
-rw-r--r--drivers/staging/lustre/lnet/libcfs/workitem.c2
-rw-r--r--drivers/staging/lustre/lnet/lnet/Makefile3
-rw-r--r--drivers/staging/lustre/lnet/lnet/acceptor.c2
-rw-r--r--drivers/staging/lustre/lnet/lnet/api-ni.c4
-rw-r--r--drivers/staging/lustre/lnet/lnet/config.c2
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-eq.c3
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-md.c2
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-me.c2
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-move.c6
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-msg.c2
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-ptl.c2
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-socket.c4
-rw-r--r--drivers/staging/lustre/lnet/lnet/lo.c3
-rw-r--r--drivers/staging/lustre/lnet/lnet/module.c5
-rw-r--r--drivers/staging/lustre/lnet/lnet/net_fault.c4
-rw-r--r--drivers/staging/lustre/lnet/lnet/nidstrings.c4
-rw-r--r--drivers/staging/lustre/lnet/lnet/peer.c4
-rw-r--r--drivers/staging/lustre/lnet/lnet/router.c3
-rw-r--r--drivers/staging/lustre/lnet/lnet/router_proc.c5
-rw-r--r--drivers/staging/lustre/lnet/selftest/Makefile3
-rw-r--r--drivers/staging/lustre/lnet/selftest/conctl.c8
-rw-r--r--drivers/staging/lustre/lnet/selftest/conrpc.c9
-rw-r--r--drivers/staging/lustre/lnet/selftest/conrpc.h7
-rw-r--r--drivers/staging/lustre/lnet/selftest/console.c4
-rw-r--r--drivers/staging/lustre/lnet/selftest/console.h7
-rw-r--r--drivers/staging/lustre/lnet/selftest/rpc.h2
-rw-r--r--drivers/staging/lustre/lnet/selftest/selftest.h9
-rw-r--r--drivers/staging/lustre/lustre/Kconfig10
-rw-r--r--drivers/staging/lustre/lustre/fid/Makefile3
-rw-r--r--drivers/staging/lustre/lustre/fid/fid_internal.h4
-rw-r--r--drivers/staging/lustre/lustre/fid/fid_lib.c5
-rw-r--r--drivers/staging/lustre/lustre/fid/fid_request.c12
-rw-r--r--drivers/staging/lustre/lustre/fid/lproc_fid.c12
-rw-r--r--drivers/staging/lustre/lustre/fld/Makefile3
-rw-r--r--drivers/staging/lustre/lustre/fld/fld_cache.c19
-rw-r--r--drivers/staging/lustre/lustre/fld/fld_internal.h8
-rw-r--r--drivers/staging/lustre/lustre/fld/fld_request.c18
-rw-r--r--drivers/staging/lustre/lustre/fld/lproc_fld.c14
-rw-r--r--drivers/staging/lustre/lustre/include/cl_object.h6
-rw-r--r--drivers/staging/lustre/lustre/include/interval_tree.h4
-rw-r--r--drivers/staging/lustre/lustre/include/llog_swab.h3
-rw-r--r--drivers/staging/lustre/lustre/include/lprocfs_status.h10
-rw-r--r--drivers/staging/lustre/lustre/include/lu_object.h6
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_compat.h2
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_debug.h4
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_disk.h23
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_dlm.h25
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_errno.h (renamed from drivers/staging/lustre/lustre/include/lustre/lustre_errno.h)0
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_export.h6
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_fid.h84
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_fld.h5
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_handles.h2
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_import.h4
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_kernelcomm.h2
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_lib.h12
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_linkea.h15
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_lmv.h2
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_log.h4
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_mdc.h15
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_mds.h11
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_net.h39
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_nrs.h2
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_obdo.h2
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_param.h109
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_swab.h8
-rw-r--r--drivers/staging/lustre/lustre/include/obd.h20
-rw-r--r--drivers/staging/lustre/lustre/include/obd_cksum.h6
-rw-r--r--drivers/staging/lustre/lustre/include/obd_class.h44
-rw-r--r--drivers/staging/lustre/lustre/include/obd_support.h6
-rw-r--r--drivers/staging/lustre/lustre/include/seq_range.h2
-rw-r--r--drivers/staging/lustre/lustre/ldlm/interval_tree.c46
-rw-r--r--drivers/staging/lustre/lustre/ldlm/l_lock.c6
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_extent.c12
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_flock.c14
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_inodebits.c6
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_internal.h9
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_lib.c12
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_lock.c17
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c10
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_plain.c6
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_pool.c8
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_request.c36
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_resource.c53
-rw-r--r--drivers/staging/lustre/lustre/llite/Makefile3
-rw-r--r--drivers/staging/lustre/lustre/llite/dcache.c6
-rw-r--r--drivers/staging/lustre/lustre/llite/dir.c22
-rw-r--r--drivers/staging/lustre/lustre/llite/file.c15
-rw-r--r--drivers/staging/lustre/lustre/llite/glimpse.c16
-rw-r--r--drivers/staging/lustre/lustre/llite/lcommon_cl.c35
-rw-r--r--drivers/staging/lustre/lustre/llite/lcommon_misc.c8
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_internal.h20
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_lib.c31
-rw-r--r--drivers/staging/lustre/lustre/llite/lproc_llite.c4
-rw-r--r--drivers/staging/lustre/lustre/llite/namei.c12
-rw-r--r--drivers/staging/lustre/lustre/llite/range_lock.c2
-rw-r--r--drivers/staging/lustre/lustre/llite/range_lock.h4
-rw-r--r--drivers/staging/lustre/lustre/llite/rw.c4
-rw-r--r--drivers/staging/lustre/lustre/llite/statahead.c4
-rw-r--r--drivers/staging/lustre/lustre/llite/super25.c6
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_dev.c7
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_internal.h4
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_io.c2
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_lock.c2
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_object.c4
-rw-r--r--drivers/staging/lustre/lustre/llite/xattr.c6
-rw-r--r--drivers/staging/lustre/lustre/llite/xattr_cache.c5
-rw-r--r--drivers/staging/lustre/lustre/llite/xattr_security.c23
-rw-r--r--drivers/staging/lustre/lustre/lmv/Makefile3
-rw-r--r--drivers/staging/lustre/lustre/lmv/lmv_fld.c15
-rw-r--r--drivers/staging/lustre/lustre/lmv/lmv_intent.c22
-rw-r--r--drivers/staging/lustre/lustre/lmv/lmv_internal.h8
-rw-r--r--drivers/staging/lustre/lustre/lmv/lmv_obd.c184
-rw-r--r--drivers/staging/lustre/lustre/lmv/lproc_lmv.c6
-rw-r--r--drivers/staging/lustre/lustre/lov/Makefile5
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_cl_internal.h96
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_dev.c87
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_ea.c29
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_internal.h31
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_io.c152
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_lock.c15
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_merge.c4
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_obd.c43
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_object.c469
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_offset.c4
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_pack.c13
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_page.c1
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_pool.c4
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_request.c46
-rw-r--r--drivers/staging/lustre/lustre/lov/lovsub_dev.c1
-rw-r--r--drivers/staging/lustre/lustre/lov/lovsub_io.c51
-rw-r--r--drivers/staging/lustre/lustre/lov/lovsub_lock.c2
-rw-r--r--drivers/staging/lustre/lustre/lov/lproc_lov.c6
-rw-r--r--drivers/staging/lustre/lustre/mdc/Makefile3
-rw-r--r--drivers/staging/lustre/lustre/mdc/lproc_mdc.c8
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_internal.h2
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_lib.c10
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_locks.c26
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_reint.c4
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_request.c25
-rw-r--r--drivers/staging/lustre/lustre/mgc/Makefile3
-rw-r--r--drivers/staging/lustre/lustre/mgc/lproc_mgc.c4
-rw-r--r--drivers/staging/lustre/lustre/mgc/mgc_internal.h11
-rw-r--r--drivers/staging/lustre/lustre/mgc/mgc_request.c39
-rw-r--r--drivers/staging/lustre/lustre/obdclass/Makefile3
-rw-r--r--drivers/staging/lustre/lustre/obdclass/cl_io.c8
-rw-r--r--drivers/staging/lustre/lustre/obdclass/cl_lock.c8
-rw-r--r--drivers/staging/lustre/lustre/obdclass/cl_object.c14
-rw-r--r--drivers/staging/lustre/lustre/obdclass/cl_page.c8
-rw-r--r--drivers/staging/lustre/lustre/obdclass/class_obd.c41
-rw-r--r--drivers/staging/lustre/lustre/obdclass/debug.c6
-rw-r--r--drivers/staging/lustre/lustre/obdclass/genops.c6
-rw-r--r--drivers/staging/lustre/lustre/obdclass/kernelcomm.c11
-rw-r--r--drivers/staging/lustre/lustre/obdclass/linkea.c75
-rw-r--r--drivers/staging/lustre/lustre/obdclass/linux/linux-module.c102
-rw-r--r--drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c8
-rw-r--r--drivers/staging/lustre/lustre/obdclass/llog.c6
-rw-r--r--drivers/staging/lustre/lustre/obdclass/llog_cat.c2
-rw-r--r--drivers/staging/lustre/lustre/obdclass/llog_internal.h2
-rw-r--r--drivers/staging/lustre/lustre/obdclass/llog_obd.c4
-rw-r--r--drivers/staging/lustre/lustre/obdclass/llog_swab.c4
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lprocfs_counters.c4
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lprocfs_status.c8
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lu_object.c29
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lu_ref.c10
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lustre_handles.c6
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lustre_peer.c14
-rw-r--r--drivers/staging/lustre/lustre/obdclass/obd_config.c74
-rw-r--r--drivers/staging/lustre/lustre/obdclass/obd_mount.c44
-rw-r--r--drivers/staging/lustre/lustre/obdclass/obdo.c6
-rw-r--r--drivers/staging/lustre/lustre/obdclass/statfs_pack.c8
-rw-r--r--drivers/staging/lustre/lustre/obdclass/uuid.c6
-rw-r--r--drivers/staging/lustre/lustre/obdecho/Makefile3
-rw-r--r--drivers/staging/lustre/lustre/obdecho/echo_client.c33
-rw-r--r--drivers/staging/lustre/lustre/osc/Makefile3
-rw-r--r--drivers/staging/lustre/lustre/osc/lproc_osc.c8
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_cache.c31
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_cl_internal.h6
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_dev.c2
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_internal.h2
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_io.c2
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_lock.c4
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_object.c9
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_quota.c2
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_request.c32
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/Makefile3
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/client.c47
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/connection.c6
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/errno.c4
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/events.c8
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/import.c18
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/layout.c18
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/llog_client.c8
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/llog_net.c6
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c27
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/niobuf.c10
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/nrs.c10
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/nrs_fifo.c6
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/pack_generic.c20
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/pers.c10
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/pinger.c4
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/ptlrpc_module.c8
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c18
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/recover.c18
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec.c16
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c20
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_config.c13
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_gc.c12
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_lproc.c16
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_null.c10
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_plain.c10
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/service.c26
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/wiretest.c24
-rw-r--r--drivers/staging/media/atomisp/i2c/ap1302.c7
-rw-r--r--drivers/staging/media/atomisp/i2c/gc0310.c5
-rw-r--r--drivers/staging/media/atomisp/i2c/gc2235.c4
-rw-r--r--drivers/staging/media/atomisp/i2c/gc2235.h6
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/ad5816g.c11
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/drv201.c11
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/dw9714.c14
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/dw9718.c5
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/dw9719.c11
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/imx.c48
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/imx.h29
-rw-r--r--drivers/staging/media/atomisp/i2c/lm3554.c2
-rw-r--r--drivers/staging/media/atomisp/i2c/mt9m114.c12
-rw-r--r--drivers/staging/media/atomisp/i2c/ov2680.c19
-rw-r--r--drivers/staging/media/atomisp/i2c/ov2722.c2
-rw-r--r--drivers/staging/media/atomisp/i2c/ov5693/ov5693.c10
-rw-r--r--drivers/staging/media/atomisp/i2c/ov8858.c2
-rw-r--r--drivers/staging/media/atomisp/i2c/ov8858.h3
-rw-r--r--drivers/staging/media/atomisp/i2c/ov8858_btns.h3
-rw-r--r--drivers/staging/media/atomisp/include/linux/atomisp.h6
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_cmd.c50
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_cmd.h3
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_css20.c8
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_fops.c4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_internal.h13
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_ioctl.c3
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_subdev.c6
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_v4l2.c35
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/ibuf_ctrl_rmgr.c27
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css.c1
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_firmware.c8
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm.c139
-rw-r--r--drivers/staging/media/bcm2048/radio-bcm2048.c3
-rw-r--r--drivers/staging/media/cxd2099/cxd2099.c21
-rw-r--r--drivers/staging/media/davinci_vpfe/vpfe_video.c2
-rw-r--r--drivers/staging/media/imx/Kconfig3
-rw-r--r--drivers/staging/media/imx/imx-ic-prpencvf.c57
-rw-r--r--drivers/staging/media/imx/imx-media-capture.c4
-rw-r--r--drivers/staging/media/imx/imx-media-csi.c37
-rw-r--r--drivers/staging/media/imx/imx-media-dev.c4
-rw-r--r--drivers/staging/media/imx/imx-media-of.c50
-rw-r--r--drivers/staging/media/imx/imx-media-vdic.c37
-rw-r--r--drivers/staging/media/lirc/lirc_zilog.c18
-rw-r--r--drivers/staging/media/omap4iss/iss_video.c2
-rw-r--r--drivers/staging/most/hdm-dim2/dim2_hdm.c10
-rw-r--r--drivers/staging/most/hdm-usb/hdm_usb.c22
-rw-r--r--drivers/staging/mt29f_spinand/mt29f_spinand.c2
-rw-r--r--drivers/staging/nvec/nvec.c2
-rw-r--r--drivers/staging/octeon/ethernet-rx.c79
-rw-r--r--drivers/staging/olpc_dcon/olpc_dcon.c2
-rw-r--r--drivers/staging/pi433/Documentation/devicetree/pi433-overlay.dts53
-rw-r--r--drivers/staging/pi433/Documentation/devicetree/pi433.txt62
-rw-r--r--drivers/staging/pi433/Documentation/pi433.txt274
-rw-r--r--drivers/staging/pi433/Kconfig16
-rw-r--r--drivers/staging/pi433/Makefile3
-rw-r--r--drivers/staging/pi433/TODO5
-rw-r--r--drivers/staging/pi433/pi433_if.c1338
-rw-r--r--drivers/staging/pi433/pi433_if.h152
-rw-r--r--drivers/staging/pi433/rf69.c1032
-rw-r--r--drivers/staging/pi433/rf69.h82
-rw-r--r--drivers/staging/pi433/rf69_enum.h201
-rw-r--r--drivers/staging/pi433/rf69_registers.h489
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_ap.c16
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_cmd.c2
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_ioctl_set.c6
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_mlme.c14
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_mlme_ext.c6
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_ioctl.h3
-rw-r--r--drivers/staging/rtl8188eu/os_dep/ioctl_linux.c2
-rw-r--r--drivers/staging/rtl8188eu/os_dep/usb_intf.c3
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c78
-rw-r--r--drivers/staging/rtl8192u/ieee80211/rtl819x_HTProc.c2
-rw-r--r--drivers/staging/rtl8192u/r8192U_core.c23
-rw-r--r--drivers/staging/rtl8192u/r8192U_hw.h11
-rw-r--r--drivers/staging/rtl8192u/r819xU_cmdpkt.c2
-rw-r--r--drivers/staging/rtl8712/mlme_linux.c4
-rw-r--r--drivers/staging/rtl8712/rtl8712_cmd.c2
-rw-r--r--drivers/staging/rtl8712/rtl8712_efuse.c2
-rw-r--r--drivers/staging/rtl8712/rtl8712_xmit.c8
-rw-r--r--drivers/staging/rtl8712/usb_intf.c2
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_btcoex.c2
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_cmd.c2
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_efuse.c2
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_ioctl_set.c1
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_mlme.c7
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_mlme_ext.c4
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_odm.c38
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_pwrctrl.c6
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_recv.c4
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_security.c14
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_xmit.c6
-rw-r--r--drivers/staging/rtl8723bs/hal/hal_btcoex.c14
-rw-r--r--drivers/staging/rtl8723bs/hal/hal_com.c4
-rw-r--r--drivers/staging/rtl8723bs/hal/odm.c46
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c20
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723b_rxdesc.c2
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c2
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c2
-rw-r--r--drivers/staging/rtl8723bs/hal/sdio_ops.c2
-rw-r--r--drivers/staging/rtl8723bs/os_dep/ioctl_linux.c1008
-rw-r--r--drivers/staging/rtl8723bs/os_dep/mlme_linux.c6
-rw-r--r--drivers/staging/rtl8723bs/os_dep/osdep_service.c2
-rw-r--r--drivers/staging/rtl8723bs/os_dep/sdio_intf.c2
-rw-r--r--drivers/staging/rtl8723bs/os_dep/sdio_ops_linux.c4
-rw-r--r--drivers/staging/rtl8723bs/os_dep/xmit_linux.c10
-rw-r--r--drivers/staging/rtlwifi/Kconfig22
-rw-r--r--drivers/staging/rtlwifi/Makefile70
-rw-r--r--drivers/staging/rtlwifi/TODO11
-rw-r--r--drivers/staging/rtlwifi/base.c2826
-rw-r--r--drivers/staging/rtlwifi/base.h186
-rw-r--r--drivers/staging/rtlwifi/btcoexist/Makefile8
-rw-r--r--drivers/staging/rtlwifi/btcoexist/halbt_precomp.h85
-rw-r--r--drivers/staging/rtlwifi/btcoexist/halbtc8822b1ant.c5244
-rw-r--r--drivers/staging/rtlwifi/btcoexist/halbtc8822b1ant.h444
-rw-r--r--drivers/staging/rtlwifi/btcoexist/halbtc8822b2ant.c5225
-rw-r--r--drivers/staging/rtlwifi/btcoexist/halbtc8822b2ant.h498
-rw-r--r--drivers/staging/rtlwifi/btcoexist/halbtc8822bwifionly.c65
-rw-r--r--drivers/staging/rtlwifi/btcoexist/halbtc8822bwifionly.h35
-rw-r--r--drivers/staging/rtlwifi/btcoexist/halbtcoutsrc.c1881
-rw-r--r--drivers/staging/rtlwifi/btcoexist/halbtcoutsrc.h802
-rw-r--r--drivers/staging/rtlwifi/btcoexist/rtl_btc.c528
-rw-r--r--drivers/staging/rtlwifi/btcoexist/rtl_btc.h75
-rw-r--r--drivers/staging/rtlwifi/cam.c326
-rw-r--r--drivers/staging/rtlwifi/cam.h50
-rw-r--r--drivers/staging/rtlwifi/core.c2046
-rw-r--r--drivers/staging/rtlwifi/core.h86
-rw-r--r--drivers/staging/rtlwifi/debug.c636
-rw-r--r--drivers/staging/rtlwifi/debug.h234
-rw-r--r--drivers/staging/rtlwifi/efuse.c1342
-rw-r--r--drivers/staging/rtlwifi/efuse.h120
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_2_platform.h52
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_8822b_cfg.h132
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_8822b_phy.c106
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_8822b_pwr_seq.c563
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_8822b_pwr_seq.h40
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_api_8822b.c343
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_api_8822b.h44
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_api_8822b_pcie.c323
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_api_8822b_pcie.h53
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_api_8822b_sdio.c184
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_api_8822b_sdio.h42
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_api_8822b_usb.c185
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_api_8822b_usb.h45
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_func_8822b.c414
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_func_8822b.h38
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_88xx_cfg.h171
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx.c5979
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx.h396
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx_pcie.c329
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx_pcie.h71
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx_sdio.c974
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx_sdio.h84
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx_usb.c554
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx_usb.h73
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_func_88xx.c4494
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_func_88xx.h321
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_api.c426
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_api.h82
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_bit2.h13407
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_bit_8822b.h12103
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_fw_info.h122
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_fw_offload_c2h_nic.h184
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_fw_offload_h2c_nic.h515
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_h2c_extra_info_nic.h115
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_intf_phy_cmd.h54
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_original_c2h_nic.h403
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_original_h2c_nic.h1011
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_pcie_reg.h28
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_pwr_seq_cmd.h116
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_reg2.h1132
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_reg_8822b.h728
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_rx_bd_chip.h48
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_rx_bd_nic.h48
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_rx_desc_chip.h118
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_rx_desc_nic.h133
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_sdio_reg.h62
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_tx_bd_chip.h118
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_tx_bd_nic.h123
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_tx_desc_chip.h444
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_tx_desc_nic.h506
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_type.h1934
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_usb_reg.h28
-rw-r--r--drivers/staging/rtlwifi/halmac/rtl_halmac.c1384
-rw-r--r--drivers/staging/rtlwifi/halmac/rtl_halmac.h94
-rw-r--r--drivers/staging/rtlwifi/pci.c2508
-rw-r--r--drivers/staging/rtlwifi/pci.h329
-rw-r--r--drivers/staging/rtlwifi/phydm/halphyrf_ce.c965
-rw-r--r--drivers/staging/rtlwifi/phydm/halphyrf_ce.h85
-rw-r--r--drivers/staging/rtlwifi/phydm/mp_precomp.h24
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm.c1986
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm.h946
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_acs.c200
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_acs.h57
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_adaptivity.c941
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_adaptivity.h119
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_adc_sampling.c628
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_adc_sampling.h96
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_antdiv.c83
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_antdiv.h301
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_beamforming.h48
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_ccx.c457
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_ccx.h83
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_cfotracking.c343
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_cfotracking.h60
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_debug.c2910
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_debug.h175
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_dfs.h59
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_dig.c1535
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_dig.h241
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_dynamic_rx_path.h37
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_dynamicbbpowersaving.c129
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_dynamicbbpowersaving.h50
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_dynamictxpower.c102
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_dynamictxpower.h64
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_edcaturbocheck.c139
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_edcaturbocheck.h44
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_features.h33
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_hwconfig.c1928
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_hwconfig.h510
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_interface.c341
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_interface.h205
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_iqk.h76
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_kfree.c228
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_kfree.h42
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_noisemonitor.c330
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_noisemonitor.h46
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_powertracking_ce.c644
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_powertracking_ce.h293
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_pre_define.h613
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_precomp.h85
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_psd.c422
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_psd.h67
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_rainfo.c1208
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_rainfo.h269
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_reg.h151
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_regdefine11ac.h94
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_regdefine11n.h213
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_types.h130
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl8822b/halhwimg8822b_bb.c1969
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl8822b/halhwimg8822b_bb.h54
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl8822b/halhwimg8822b_mac.c222
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl8822b/halhwimg8822b_mac.h38
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl8822b/halhwimg8822b_rf.c4744
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl8822b/halhwimg8822b_rf.h129
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl8822b/halphyrf_8822b.c351
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl8822b/halphyrf_8822b.h45
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl8822b/phydm_hal_api8822b.c1815
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl8822b/phydm_hal_api8822b.h84
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl8822b/phydm_iqk_8822b.c1410
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl8822b/phydm_iqk_8822b.h48
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl8822b/phydm_regconfig8822b.c168
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl8822b/phydm_regconfig8822b.h54
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl8822b/phydm_rtl8822b.c225
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl8822b/phydm_rtl8822b.h30
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl8822b/version_rtl8822b.h34
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl_phydm.c874
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl_phydm.h45
-rw-r--r--drivers/staging/rtlwifi/phydm/txbf/halcomtxbf.h67
-rw-r--r--drivers/staging/rtlwifi/phydm/txbf/haltxbf8822b.h39
-rw-r--r--drivers/staging/rtlwifi/phydm/txbf/haltxbfinterface.h38
-rw-r--r--drivers/staging/rtlwifi/phydm/txbf/haltxbfjaguar.h36
-rw-r--r--drivers/staging/rtlwifi/phydm/txbf/phydm_hal_txbf_api.h41
-rw-r--r--drivers/staging/rtlwifi/ps.c1007
-rw-r--r--drivers/staging/rtlwifi/ps.h50
-rw-r--r--drivers/staging/rtlwifi/pwrseqcmd.h94
-rw-r--r--drivers/staging/rtlwifi/rc.c322
-rw-r--r--drivers/staging/rtlwifi/rc.h49
-rw-r--r--drivers/staging/rtlwifi/regd.c469
-rw-r--r--drivers/staging/rtlwifi/regd.h63
-rw-r--r--drivers/staging/rtlwifi/rtl8822be/Makefile7
-rw-r--r--drivers/staging/rtlwifi/rtl8822be/def.h82
-rw-r--r--drivers/staging/rtlwifi/rtl8822be/fw.c968
-rw-r--r--drivers/staging/rtlwifi/rtl8822be/fw.h198
-rw-r--r--drivers/staging/rtlwifi/rtl8822be/hw.c2441
-rw-r--r--drivers/staging/rtlwifi/rtl8822be/hw.h66
-rw-r--r--drivers/staging/rtlwifi/rtl8822be/led.c127
-rw-r--r--drivers/staging/rtlwifi/rtl8822be/led.h34
-rw-r--r--drivers/staging/rtlwifi/rtl8822be/phy.c2233
-rw-r--r--drivers/staging/rtlwifi/rtl8822be/phy.h145
-rw-r--r--drivers/staging/rtlwifi/rtl8822be/reg.h1653
-rw-r--r--drivers/staging/rtlwifi/rtl8822be/sw.c481
-rw-r--r--drivers/staging/rtlwifi/rtl8822be/sw.h32
-rw-r--r--drivers/staging/rtlwifi/rtl8822be/trx.c1015
-rw-r--r--drivers/staging/rtlwifi/rtl8822be/trx.h165
-rw-r--r--drivers/staging/rtlwifi/stats.c260
-rw-r--r--drivers/staging/rtlwifi/stats.h42
-rw-r--r--drivers/staging/rtlwifi/wifi.h3375
-rw-r--r--drivers/staging/rts5208/ms.c5
-rw-r--r--drivers/staging/rts5208/rtsx.c15
-rw-r--r--drivers/staging/rts5208/rtsx_chip.c4
-rw-r--r--drivers/staging/rts5208/rtsx_scsi.c2
-rw-r--r--drivers/staging/rts5208/sd.c25
-rw-r--r--drivers/staging/rts5208/spi.c8
-rw-r--r--drivers/staging/rts5208/xd.c17
-rw-r--r--drivers/staging/skein/skein_block.c323
-rw-r--r--drivers/staging/skein/skein_block.h323
-rw-r--r--drivers/staging/speakup/spk_ttyio.c77
-rw-r--r--drivers/staging/typec/fusb302/Kconfig2
-rw-r--r--drivers/staging/typec/fusb302/TODO4
-rw-r--r--drivers/staging/typec/fusb302/fusb302.c144
-rw-r--r--drivers/staging/typec/pd.h2
-rw-r--r--drivers/staging/typec/tcpm.c471
-rw-r--r--drivers/staging/typec/tcpm.h12
-rw-r--r--drivers/staging/unisys/Documentation/overview.txt14
-rw-r--r--drivers/staging/unisys/include/channel.h361
-rw-r--r--drivers/staging/unisys/include/iochannel.h554
-rw-r--r--drivers/staging/unisys/include/visorbus.h44
-rw-r--r--drivers/staging/unisys/visorbus/controlvmchannel.h715
-rw-r--r--drivers/staging/unisys/visorbus/vbuschannel.h96
-rw-r--r--drivers/staging/unisys/visorbus/visorbus_main.c469
-rw-r--r--drivers/staging/unisys/visorbus/visorbus_private.h38
-rw-r--r--drivers/staging/unisys/visorbus/visorchannel.c219
-rw-r--r--drivers/staging/unisys/visorbus/visorchipset.c607
-rw-r--r--drivers/staging/unisys/visorbus/vmcallinterface.h54
-rw-r--r--drivers/staging/unisys/visorhba/visorhba_main.c556
-rw-r--r--drivers/staging/unisys/visorinput/ultrainputreport.h68
-rw-r--r--drivers/staging/unisys/visorinput/visorinput.c138
-rw-r--r--drivers/staging/unisys/visornic/visornic_main.c736
-rw-r--r--drivers/staging/vboxvideo/Kconfig11
-rw-r--r--drivers/staging/vboxvideo/TODO4
-rw-r--r--drivers/staging/vboxvideo/vbox_drv.c7
-rw-r--r--drivers/staging/vboxvideo/vbox_fb.c176
-rw-r--r--drivers/staging/vboxvideo/vbox_main.c8
-rw-r--r--drivers/staging/vboxvideo/vbox_mode.c12
-rw-r--r--drivers/staging/vboxvideo/vbox_ttm.c2
-rw-r--r--drivers/staging/vc04_services/bcm2835-audio/bcm2835-pcm.c8
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c4
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.c2
-rw-r--r--drivers/staging/vt6655/card.c6
-rw-r--r--drivers/staging/vt6655/mac.c2
-rw-r--r--drivers/staging/vt6656/device.h2
-rw-r--r--drivers/staging/vt6656/firmware.c2
-rw-r--r--drivers/staging/vt6656/key.h2
-rw-r--r--drivers/staging/vt6656/main_usb.c5
-rw-r--r--drivers/staging/vt6656/power.c6
-rw-r--r--drivers/staging/vt6656/rf.c6
-rw-r--r--drivers/staging/vt6656/usbpipe.c4
-rw-r--r--drivers/staging/wilc1000/host_interface.c4
-rw-r--r--drivers/staging/wilc1000/linux_wlan.c5
-rw-r--r--drivers/staging/wilc1000/wilc_wfi_cfgoperations.c65
-rw-r--r--drivers/staging/wilc1000/wilc_wfi_netdevice.h4
-rw-r--r--drivers/staging/wlan-ng/hfa384x.h100
-rw-r--r--drivers/staging/wlan-ng/hfa384x_usb.c16
-rw-r--r--drivers/staging/wlan-ng/p80211conv.c1
-rw-r--r--drivers/staging/wlan-ng/p80211netdev.c2
-rw-r--r--drivers/staging/wlan-ng/prism2fw.c27
-rw-r--r--drivers/staging/wlan-ng/prism2sta.c37
-rw-r--r--drivers/target/target_core_alua.c3
-rw-r--r--drivers/target/target_core_file.c2
-rw-r--r--drivers/target/target_core_iblock.c4
-rw-r--r--drivers/target/target_core_pr.c3
-rw-r--r--drivers/tee/optee/core.c19
-rw-r--r--drivers/tee/optee/optee_smc.h12
-rw-r--r--drivers/tee/optee/rpc.c15
-rw-r--r--drivers/tee/tee_core.c5
-rw-r--r--drivers/tee/tee_shm.c2
-rw-r--r--drivers/thermal/Kconfig12
-rw-r--r--drivers/thermal/Makefile1
-rw-r--r--drivers/thermal/broadcom/bcm2835_thermal.c2
-rw-r--r--drivers/thermal/hisi_thermal.c2
-rw-r--r--drivers/thermal/int340x_thermal/acpi_thermal_rel.c2
-rw-r--r--drivers/thermal/int340x_thermal/acpi_thermal_rel.h8
-rw-r--r--drivers/thermal/int340x_thermal/int3400_thermal.c43
-rw-r--r--drivers/thermal/int340x_thermal/int3406_thermal.c96
-rw-r--r--drivers/thermal/int340x_thermal/processor_thermal_device.c2
-rw-r--r--drivers/thermal/intel_pch_thermal.c12
-rw-r--r--drivers/thermal/mtk_thermal.c88
-rw-r--r--drivers/thermal/qoriq_thermal.c2
-rw-r--r--drivers/thermal/rcar_gen3_thermal.c2
-rw-r--r--drivers/thermal/rockchip_thermal.c65
-rw-r--r--drivers/thermal/samsung/exynos_tmu.c2
-rw-r--r--drivers/thermal/thermal_core.c31
-rw-r--r--drivers/thermal/thermal_core.h1
-rw-r--r--drivers/thermal/thermal_sysfs.c29
-rw-r--r--drivers/thermal/uniphier_thermal.c384
-rw-r--r--drivers/thermal/zx2967_thermal.c2
-rw-r--r--drivers/thunderbolt/ctl.c2
-rw-r--r--drivers/thunderbolt/icm.c13
-rw-r--r--drivers/thunderbolt/switch.c20
-rw-r--r--drivers/thunderbolt/tb.c4
-rw-r--r--drivers/tty/Kconfig8
-rw-r--r--drivers/tty/Makefile1
-rw-r--r--drivers/tty/ehv_bytechan.c2
-rw-r--r--drivers/tty/goldfish.c234
-rw-r--r--drivers/tty/hvc/Kconfig2
-rw-r--r--drivers/tty/hvc/hvc_opal.c16
-rw-r--r--drivers/tty/hvc/hvc_vio.c20
-rw-r--r--drivers/tty/hvc/hvcs.c4
-rw-r--r--drivers/tty/isicom.c2
-rw-r--r--drivers/tty/mips_ejtag_fdc.c2
-rw-r--r--drivers/tty/moxa.c2
-rw-r--r--drivers/tty/mxser.c2
-rw-r--r--drivers/tty/n_gsm.c11
-rw-r--r--drivers/tty/pty.c72
-rw-r--r--drivers/tty/serdev/core.c2
-rw-r--r--drivers/tty/serial/21285.c2
-rw-r--r--drivers/tty/serial/8250/8250_aspeed_vuart.c7
-rw-r--r--drivers/tty/serial/8250/8250_core.c16
-rw-r--r--drivers/tty/serial/8250/8250_dw.c2
-rw-r--r--drivers/tty/serial/8250/8250_early.c8
-rw-r--r--drivers/tty/serial/8250/8250_exar.c2
-rw-r--r--drivers/tty/serial/8250/8250_gsc.c8
-rw-r--r--drivers/tty/serial/8250/8250_ingenic.c8
-rw-r--r--drivers/tty/serial/8250/8250_men_mcb.c118
-rw-r--r--drivers/tty/serial/8250/8250_mtk.c5
-rw-r--r--drivers/tty/serial/8250/8250_of.c60
-rw-r--r--drivers/tty/serial/8250/8250_pci.c43
-rw-r--r--drivers/tty/serial/8250/8250_port.c81
-rw-r--r--drivers/tty/serial/8250/8250_uniphier.c63
-rw-r--r--drivers/tty/serial/8250/Kconfig11
-rw-r--r--drivers/tty/serial/8250/Makefile1
-rw-r--r--drivers/tty/serial/Kconfig4
-rw-r--r--drivers/tty/serial/amba-pl010.c2
-rw-r--r--drivers/tty/serial/amba-pl011.c6
-rw-r--r--drivers/tty/serial/apbuart.c2
-rw-r--r--drivers/tty/serial/arc_uart.c4
-rw-r--r--drivers/tty/serial/cpm_uart/cpm_uart_core.c2
-rw-r--r--drivers/tty/serial/earlycon.c7
-rw-r--r--drivers/tty/serial/fsl_lpuart.c76
-rw-r--r--drivers/tty/serial/imx.c20
-rw-r--r--drivers/tty/serial/jsm/jsm_driver.c2
-rw-r--r--drivers/tty/serial/m32r_sio.c2
-rw-r--r--drivers/tty/serial/meson_uart.c2
-rw-r--r--drivers/tty/serial/mpc52xx_uart.c14
-rw-r--r--drivers/tty/serial/msm_serial.c19
-rw-r--r--drivers/tty/serial/mux.c16
-rw-r--r--drivers/tty/serial/omap-serial.c13
-rw-r--r--drivers/tty/serial/owl-uart.c635
-rw-r--r--drivers/tty/serial/pch_uart.c40
-rw-r--r--drivers/tty/serial/pmac_zilog.c4
-rw-r--r--drivers/tty/serial/serial-tegra.c2
-rw-r--r--drivers/tty/serial/serial_core.c47
-rw-r--r--drivers/tty/serial/sh-sci.c3
-rw-r--r--drivers/tty/serial/sprd_serial.c8
-rw-r--r--drivers/tty/serial/st-asc.c2
-rw-r--r--drivers/tty/serial/stm32-usart.c125
-rw-r--r--drivers/tty/serial/stm32-usart.h37
-rw-r--r--drivers/tty/serial/sunsab.c2
-rw-r--r--drivers/tty/serial/sunsu.c6
-rw-r--r--drivers/tty/serial/ucc_uart.c2
-rw-r--r--drivers/tty/serial/xilinx_uartps.c2
-rw-r--r--drivers/tty/synclink.c2
-rw-r--r--drivers/tty/synclink_gt.c2
-rw-r--r--drivers/tty/synclinkmp.c2
-rw-r--r--drivers/tty/tty_buffer.c26
-rw-r--r--drivers/tty/tty_io.c115
-rw-r--r--drivers/tty/vcc.c1155
-rw-r--r--drivers/usb/atm/cxacru.c2
-rw-r--r--drivers/usb/atm/speedtch.c6
-rw-r--r--drivers/usb/atm/ueagle-atm.c4
-rw-r--r--drivers/usb/atm/usbatm.c6
-rw-r--r--drivers/usb/atm/xusbatm.c1
-rw-r--r--drivers/usb/c67x00/c67x00-hcd.c2
-rw-r--r--drivers/usb/chipidea/Makefile1
-rw-r--r--drivers/usb/chipidea/ci_hdrc_msm.c2
-rw-r--r--drivers/usb/chipidea/ci_hdrc_pci.c1
-rw-r--r--drivers/usb/chipidea/ci_hdrc_tegra.c155
-rw-r--r--drivers/usb/chipidea/ci_hdrc_usb2.c2
-rw-r--r--drivers/usb/chipidea/core.c6
-rw-r--r--drivers/usb/chipidea/otg_fsm.c2
-rw-r--r--drivers/usb/chipidea/udc.c8
-rw-r--r--drivers/usb/class/cdc-wdm.c4
-rw-r--r--drivers/usb/class/usbtmc.c4
-rw-r--r--drivers/usb/common/common.c15
-rw-r--r--drivers/usb/common/ulpi.c2
-rw-r--r--drivers/usb/core/devio.c6
-rw-r--r--drivers/usb/core/hcd.c2
-rw-r--r--drivers/usb/core/hub.c19
-rw-r--r--drivers/usb/core/ledtrig-usbport.c5
-rw-r--r--drivers/usb/core/quirks.c6
-rw-r--r--drivers/usb/core/sysfs.c2
-rw-r--r--drivers/usb/dwc2/gadget.c2
-rw-r--r--drivers/usb/dwc2/hcd.c6
-rw-r--r--drivers/usb/dwc3/dwc3-keystone.c22
-rw-r--r--drivers/usb/dwc3/dwc3-of-simple.c4
-rw-r--r--drivers/usb/dwc3/dwc3-omap.c4
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c2
-rw-r--r--drivers/usb/gadget/Kconfig2
-rw-r--r--drivers/usb/gadget/function/f_fs.c7
-rw-r--r--drivers/usb/gadget/function/f_hid.c17
-rw-r--r--drivers/usb/gadget/function/f_mass_storage.c21
-rw-r--r--drivers/usb/gadget/function/f_midi.c68
-rw-r--r--drivers/usb/gadget/function/f_ncm.c2
-rw-r--r--drivers/usb/gadget/function/f_rndis.c20
-rw-r--r--drivers/usb/gadget/function/u_audio.c4
-rw-r--r--drivers/usb/gadget/function/u_ether.c2
-rw-r--r--drivers/usb/gadget/function/u_ether.h1
-rw-r--r--drivers/usb/gadget/function/u_ether_configfs.h35
-rw-r--r--drivers/usb/gadget/function/u_rndis.h4
-rw-r--r--drivers/usb/gadget/function/u_serial.c2
-rw-r--r--drivers/usb/gadget/legacy/webcam.c1
-rw-r--r--drivers/usb/gadget/udc/bdc/Kconfig1
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc.h24
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_core.c148
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_dbg.c16
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_ep.c4
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_udc.c7
-rw-r--r--drivers/usb/gadget/udc/core.c20
-rw-r--r--drivers/usb/gadget/udc/dummy_hcd.c2
-rw-r--r--drivers/usb/gadget/udc/fsl_qe_udc.c2
-rw-r--r--drivers/usb/gadget/udc/mv_udc_core.c2
-rw-r--r--drivers/usb/gadget/udc/renesas_usb3.c117
-rw-r--r--drivers/usb/gadget/udc/s3c2410_udc.c4
-rw-r--r--drivers/usb/gadget/udc/snps_udc_plat.c6
-rw-r--r--drivers/usb/host/ehci-fsl.c2
-rw-r--r--drivers/usb/host/ehci-omap.c4
-rw-r--r--drivers/usb/host/fsl-mph-dr-of.c4
-rw-r--r--drivers/usb/host/hwa-hc.c4
-rw-r--r--drivers/usb/host/imx21-hcd.c8
-rw-r--r--drivers/usb/host/isp116x-hcd.c2
-rw-r--r--drivers/usb/host/isp1362-hcd.c2
-rw-r--r--drivers/usb/host/max3421-hcd.c2
-rw-r--r--drivers/usb/host/ohci-omap.c2
-rw-r--r--drivers/usb/host/ohci-sm501.c7
-rw-r--r--drivers/usb/host/ohci-tmio.c9
-rw-r--r--drivers/usb/host/pci-quirks.c35
-rw-r--r--drivers/usb/host/r8a66597-hcd.c2
-rw-r--r--drivers/usb/host/sl811-hcd.c2
-rw-r--r--drivers/usb/host/u132-hcd.c2
-rw-r--r--drivers/usb/host/whci/hcd.c2
-rw-r--r--drivers/usb/host/xhci-hub.c127
-rw-r--r--drivers/usb/host/xhci-mtk.c1
-rw-r--r--drivers/usb/host/xhci-plat.c10
-rw-r--r--drivers/usb/host/xhci-rcar.c40
-rw-r--r--drivers/usb/host/xhci-ring.c22
-rw-r--r--drivers/usb/host/xhci-trace.h23
-rw-r--r--drivers/usb/host/xhci.h90
-rw-r--r--drivers/usb/image/microtek.c4
-rw-r--r--drivers/usb/isp1760/isp1760-hcd.c6
-rw-r--r--drivers/usb/misc/adutux.c2
-rw-r--r--drivers/usb/misc/chaoskey.c2
-rw-r--r--drivers/usb/misc/cytherm.c1
-rw-r--r--drivers/usb/misc/ftdi-elan.c33
-rw-r--r--drivers/usb/misc/idmouse.c2
-rw-r--r--drivers/usb/misc/iowarrior.c4
-rw-r--r--drivers/usb/misc/ldusb.c1
-rw-r--r--drivers/usb/misc/legousbtower.c2
-rw-r--r--drivers/usb/misc/lvstest.c41
-rw-r--r--drivers/usb/misc/rio500.c4
-rw-r--r--drivers/usb/misc/sisusbvga/sisusb.c13
-rw-r--r--drivers/usb/misc/trancevibrator.c2
-rw-r--r--drivers/usb/misc/usb251xb.c1
-rw-r--r--drivers/usb/misc/usbsevseg.c2
-rw-r--r--drivers/usb/misc/uss720.c7
-rw-r--r--drivers/usb/mtu3/mtu3.h2
-rw-r--r--drivers/usb/mtu3/mtu3_dr.c58
-rw-r--r--drivers/usb/mtu3/mtu3_gadget.c4
-rw-r--r--drivers/usb/mtu3/mtu3_gadget_ep0.c23
-rw-r--r--drivers/usb/mtu3/mtu3_host.c4
-rw-r--r--drivers/usb/mtu3/mtu3_hw_regs.h4
-rw-r--r--drivers/usb/mtu3/mtu3_plat.c1
-rw-r--r--drivers/usb/musb/musb_core.c22
-rw-r--r--drivers/usb/musb/musb_core.h24
-rw-r--r--drivers/usb/musb/musb_dsps.c13
-rw-r--r--drivers/usb/musb/musb_gadget.c6
-rw-r--r--drivers/usb/musb/musb_host.c8
-rw-r--r--drivers/usb/phy/phy-isp1301-omap.c2
-rw-r--r--drivers/usb/phy/phy-mv-usb.c4
-rw-r--r--drivers/usb/phy/phy-qcom-8x16-usb.c9
-rw-r--r--drivers/usb/phy/phy-tahvo.c2
-rw-r--r--drivers/usb/phy/phy-twl6030-usb.c2
-rw-r--r--drivers/usb/phy/phy.c276
-rw-r--r--drivers/usb/renesas_usbhs/mod_gadget.c4
-rw-r--r--drivers/usb/renesas_usbhs/mod_host.c2
-rw-r--r--drivers/usb/renesas_usbhs/pipe.c2
-rw-r--r--drivers/usb/serial/option.c10
-rw-r--r--drivers/usb/storage/realtek_cr.c1
-rw-r--r--drivers/usb/storage/uas.c4
-rw-r--r--drivers/usb/usbip/stub_main.c2
-rw-r--r--drivers/usb/usbip/usbip_common.c2
-rw-r--r--drivers/usb/usbip/usbip_common.h2
-rw-r--r--drivers/usb/usbip/vhci_hcd.c4
-rw-r--r--drivers/usb/usbip/vhci_sysfs.c6
-rw-r--r--drivers/usb/wusbcore/cbaf.c2
-rw-r--r--drivers/usb/wusbcore/dev-sysfs.c2
-rw-r--r--drivers/usb/wusbcore/wusbhc.c2
-rw-r--r--drivers/uwb/lc-rc.c2
-rw-r--r--drivers/vfio/platform/vfio_amba.c2
-rw-r--r--drivers/vfio/vfio.c25
-rw-r--r--drivers/vfio/vfio_iommu_type1.c16
-rw-r--r--drivers/vhost/net.c8
-rw-r--r--drivers/vhost/vhost.c2
-rw-r--r--drivers/vhost/vhost.h2
-rw-r--r--drivers/video/backlight/gpio_backlight.c62
-rw-r--r--drivers/video/backlight/kb3886_bl.c2
-rw-r--r--drivers/video/backlight/lm3630a_bl.c5
-rw-r--r--drivers/video/backlight/pandora_bl.c2
-rw-r--r--drivers/video/backlight/pwm_bl.c2
-rw-r--r--drivers/video/console/Kconfig2
-rw-r--r--drivers/video/console/Makefile8
-rw-r--r--drivers/video/console/sticore.c11
-rw-r--r--drivers/video/console/vgacon.c5
-rw-r--r--drivers/video/fbdev/68328fb.c2
-rw-r--r--drivers/video/fbdev/Kconfig2
-rw-r--r--drivers/video/fbdev/amba-clcd.c2
-rw-r--r--drivers/video/fbdev/arkfb.c2
-rw-r--r--drivers/video/fbdev/asiliantfb.c2
-rw-r--r--drivers/video/fbdev/atmel_lcdfb.c2
-rw-r--r--drivers/video/fbdev/aty/aty128fb.c4
-rw-r--r--drivers/video/fbdev/aty/atyfb_base.c6
-rw-r--r--drivers/video/fbdev/aty/radeon_base.c6
-rw-r--r--drivers/video/fbdev/bfin-lq035q1-fb.c2
-rw-r--r--drivers/video/fbdev/bw2.c4
-rw-r--r--drivers/video/fbdev/cg14.c4
-rw-r--r--drivers/video/fbdev/cg3.c4
-rw-r--r--drivers/video/fbdev/cg6.c4
-rw-r--r--drivers/video/fbdev/chipsfb.c4
-rw-r--r--drivers/video/fbdev/cobalt_lcdfb.c2
-rw-r--r--drivers/video/fbdev/core/Makefile14
-rw-r--r--drivers/video/fbdev/core/bitblit.c (renamed from drivers/video/console/bitblit.c)8
-rw-r--r--drivers/video/fbdev/core/fb_defio.c2
-rw-r--r--drivers/video/fbdev/core/fbcon.c (renamed from drivers/video/console/fbcon.c)37
-rw-r--r--drivers/video/fbdev/core/fbcon.h (renamed from drivers/video/console/fbcon.h)9
-rw-r--r--drivers/video/fbdev/core/fbcon_ccw.c (renamed from drivers/video/console/fbcon_ccw.c)8
-rw-r--r--drivers/video/fbdev/core/fbcon_cw.c (renamed from drivers/video/console/fbcon_cw.c)8
-rw-r--r--drivers/video/fbdev/core/fbcon_dmi_quirks.c145
-rw-r--r--drivers/video/fbdev/core/fbcon_rotate.c (renamed from drivers/video/console/fbcon_rotate.c)4
-rw-r--r--drivers/video/fbdev/core/fbcon_rotate.h (renamed from drivers/video/console/fbcon_rotate.h)0
-rw-r--r--drivers/video/fbdev/core/fbcon_ud.c (renamed from drivers/video/console/fbcon_ud.c)8
-rw-r--r--drivers/video/fbdev/core/fbmem.c24
-rw-r--r--drivers/video/fbdev/core/fbmon.c4
-rw-r--r--drivers/video/fbdev/core/softcursor.c (renamed from drivers/video/console/softcursor.c)4
-rw-r--r--drivers/video/fbdev/core/tileblit.c (renamed from drivers/video/console/tileblit.c)7
-rw-r--r--drivers/video/fbdev/cyber2000fb.c2
-rw-r--r--drivers/video/fbdev/da8xx-fb.c2
-rw-r--r--drivers/video/fbdev/dnfb.c2
-rw-r--r--drivers/video/fbdev/efifb.c31
-rw-r--r--drivers/video/fbdev/fb-puv3.c2
-rw-r--r--drivers/video/fbdev/ffb.c4
-rw-r--r--drivers/video/fbdev/fm2fb.c2
-rw-r--r--drivers/video/fbdev/geode/gxfb_core.c2
-rw-r--r--drivers/video/fbdev/grvga.c2
-rw-r--r--drivers/video/fbdev/i810/i810_main.c4
-rw-r--r--drivers/video/fbdev/imsttfb.c2
-rw-r--r--drivers/video/fbdev/intelfb/intelfbdrv.c2
-rw-r--r--drivers/video/fbdev/kyro/fbdev.c2
-rw-r--r--drivers/video/fbdev/leo.c4
-rw-r--r--drivers/video/fbdev/matrox/matroxfb_base.c14
-rw-r--r--drivers/video/fbdev/maxinefb.c2
-rw-r--r--drivers/video/fbdev/mb862xx/mb862xxfbdrv.c2
-rw-r--r--drivers/video/fbdev/mbx/mbxfb.c4
-rw-r--r--drivers/video/fbdev/neofb.c2
-rw-r--r--drivers/video/fbdev/nvidia/nvidia.c2
-rw-r--r--drivers/video/fbdev/offb.c10
-rw-r--r--drivers/video/fbdev/omap/lcd_h3.c2
-rw-r--r--drivers/video/fbdev/omap/lcd_mipid.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/panel-lgphilips-lb035q02.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/panel-sony-acx565akm.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td028ttec1.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dss-of.c3
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c2
-rw-r--r--drivers/video/fbdev/p9100.c4
-rw-r--r--drivers/video/fbdev/pm2fb.c2
-rw-r--r--drivers/video/fbdev/pm3fb.c2
-rw-r--r--drivers/video/fbdev/pmag-aa-fb.c4
-rw-r--r--drivers/video/fbdev/pmag-ba-fb.c4
-rw-r--r--drivers/video/fbdev/pmagb-b-fb.c4
-rw-r--r--drivers/video/fbdev/ps3fb.c2
-rw-r--r--drivers/video/fbdev/pvr2fb.c4
-rw-r--r--drivers/video/fbdev/pxa3xx-gcu.c4
-rw-r--r--drivers/video/fbdev/q40fb.c2
-rw-r--r--drivers/video/fbdev/riva/fbdev.c2
-rw-r--r--drivers/video/fbdev/s3fb.c2
-rw-r--r--drivers/video/fbdev/savage/savagefb_driver.c2
-rw-r--r--drivers/video/fbdev/sis/init301.c15
-rw-r--r--drivers/video/fbdev/skeletonfb.c4
-rw-r--r--drivers/video/fbdev/sm501fb.c2
-rw-r--r--drivers/video/fbdev/sm712fb.c17
-rw-r--r--drivers/video/fbdev/smscufx.c2
-rw-r--r--drivers/video/fbdev/sunxvr1000.c10
-rw-r--r--drivers/video/fbdev/sunxvr2500.c2
-rw-r--r--drivers/video/fbdev/sunxvr500.c2
-rw-r--r--drivers/video/fbdev/tcx.c4
-rw-r--r--drivers/video/fbdev/tdfxfb.c2
-rw-r--r--drivers/video/fbdev/tridentfb.c2
-rw-r--r--drivers/video/fbdev/udlfb.c5
-rw-r--r--drivers/video/fbdev/uvesafb.c9
-rw-r--r--drivers/video/fbdev/vermilion/vermilion.c4
-rw-r--r--drivers/video/fbdev/via/via-core.c2
-rw-r--r--drivers/video/fbdev/vt8623fb.c4
-rw-r--r--drivers/video/fbdev/xilinxfb.c62
-rw-r--r--drivers/video/of_display_timing.c41
-rw-r--r--drivers/video/of_videomode.c2
-rw-r--r--drivers/virt/fsl_hypervisor.c12
-rw-r--r--drivers/virtio/Kconfig4
-rw-r--r--drivers/virtio/virtio_pci_common.c10
-rw-r--r--drivers/virtio/virtio_ring.c7
-rw-r--r--drivers/w1/masters/ds1wm.c108
-rw-r--r--drivers/w1/masters/ds2482.c12
-rw-r--r--drivers/w1/masters/ds2490.c2
-rw-r--r--drivers/w1/slaves/Kconfig14
-rw-r--r--drivers/w1/slaves/Makefile2
-rw-r--r--drivers/w1/slaves/w1_bq27000.c117
-rw-r--r--drivers/w1/slaves/w1_ds2438.c9
-rw-r--r--drivers/w1/slaves/w1_ds2805.c313
-rw-r--r--drivers/w1/slaves/w1_therm.c164
-rw-r--r--drivers/w1/w1.c22
-rw-r--r--drivers/watchdog/asm9260_wdt.c4
-rw-r--r--drivers/watchdog/aspeed_wdt.c132
-rw-r--r--drivers/watchdog/bcm7038_wdt.c4
-rw-r--r--drivers/watchdog/cadence_wdt.c6
-rw-r--r--drivers/watchdog/coh901327_wdt.c2
-rw-r--r--drivers/watchdog/da9063_wdt.c67
-rw-r--r--drivers/watchdog/diag288_wdt.c2
-rw-r--r--drivers/watchdog/iTCO_wdt.c22
-rw-r--r--drivers/watchdog/it87_wdt.c2
-rw-r--r--drivers/watchdog/lantiq_wdt.c74
-rw-r--r--drivers/watchdog/max77620_wdt.c2
-rw-r--r--drivers/watchdog/mei_wdt.c2
-rw-r--r--drivers/watchdog/meson_wdt.c2
-rw-r--r--drivers/watchdog/mt7621_wdt.c4
-rw-r--r--drivers/watchdog/octeon-wdt-main.c354
-rw-r--r--drivers/watchdog/octeon-wdt-nmi.S42
-rw-r--r--drivers/watchdog/of_xilinx_wdt.c83
-rw-r--r--drivers/watchdog/pcwd_usb.c2
-rw-r--r--drivers/watchdog/qcom-wdt.c2
-rw-r--r--drivers/watchdog/renesas_wdt.c80
-rw-r--r--drivers/watchdog/rt2880_wdt.c4
-rw-r--r--drivers/watchdog/sc1200wdt.c2
-rw-r--r--drivers/watchdog/sp805_wdt.c2
-rw-r--r--drivers/watchdog/stm32_iwdg.c2
-rw-r--r--drivers/watchdog/ts72xx_wdt.c2
-rw-r--r--drivers/watchdog/twl4030_wdt.c2
-rw-r--r--drivers/watchdog/w83627hf_wdt.c2
-rw-r--r--drivers/watchdog/ziirave_wdt.c2
-rw-r--r--drivers/watchdog/zx2967_wdt.c2
-rw-r--r--drivers/xen/Kconfig12
-rw-r--r--drivers/xen/Makefile4
-rw-r--r--drivers/xen/balloon.c8
-rw-r--r--drivers/xen/biomerge.c3
-rw-r--r--drivers/xen/events/events_base.c6
-rw-r--r--drivers/xen/events/events_fifo.c7
-rw-r--r--drivers/xen/gntalloc.c2
-rw-r--r--drivers/xen/gntdev.c8
-rw-r--r--drivers/xen/platform-pci.c2
-rw-r--r--drivers/xen/pvcalls-back.c1240
-rw-r--r--drivers/xen/xen-pciback/pci_stub.c44
5737 files changed, 424606 insertions, 133309 deletions
diff --git a/drivers/Makefile b/drivers/Makefile
index dfdcda00bfe3..d90fdc413648 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -125,7 +125,6 @@ obj-$(CONFIG_ACCESSIBILITY) += accessibility/
obj-$(CONFIG_ISDN) += isdn/
obj-$(CONFIG_EDAC) += edac/
obj-$(CONFIG_EISA) += eisa/
-obj-y += lguest/
obj-$(CONFIG_CPU_FREQ) += cpufreq/
obj-$(CONFIG_CPU_IDLE) += cpuidle/
obj-y += mmc/
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index b1aacfc62b1f..90265ab4437a 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -50,6 +50,7 @@ acpi-$(CONFIG_ACPI_REDUCED_HARDWARE_ONLY) += evged.o
acpi-y += sysfs.o
acpi-y += property.o
acpi-$(CONFIG_X86) += acpi_cmos_rtc.o
+acpi-$(CONFIG_X86) += x86/apple.o
acpi-$(CONFIG_X86) += x86/utils.o
acpi-$(CONFIG_DEBUG_FS) += debugfs.o
acpi-$(CONFIG_ACPI_NUMA) += numa.o
diff --git a/drivers/acpi/acpi_lpat.c b/drivers/acpi/acpi_lpat.c
index c1c4877ca96c..2cd9f738812b 100644
--- a/drivers/acpi/acpi_lpat.c
+++ b/drivers/acpi/acpi_lpat.c
@@ -25,7 +25,7 @@
* @raw: the raw value, used as a key to get the temerature from the
* above mapping table
*
- * A positive converted temperarure value will be returned on success,
+ * A positive converted temperature value will be returned on success,
* a negative errno will be returned in error cases.
*/
int acpi_lpat_raw_to_temp(struct acpi_lpat_conversion_table *lpat_table,
@@ -55,11 +55,11 @@ EXPORT_SYMBOL_GPL(acpi_lpat_raw_to_temp);
* acpi_lpat_temp_to_raw(): Return raw value from temperature through
* LPAT conversion table
*
- * @lpat: the temperature_raw mapping table
+ * @lpat_table: the temperature_raw mapping table
* @temp: the temperature, used as a key to get the raw value from the
* above mapping table
*
- * A positive converted temperature value will be returned on success,
+ * The raw value will be returned on success,
* a negative errno will be returned in error cases.
*/
int acpi_lpat_temp_to_raw(struct acpi_lpat_conversion_table *lpat_table,
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index f88caf5aab76..032ae44710e5 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -465,7 +465,8 @@ static int acpi_lpss_create_device(struct acpi_device *adev,
acpi_dev_free_resource_list(&resource_list);
if (!pdata->mmio_base) {
- ret = -ENOMEM;
+ /* Skip the device, but continue the namespace scan. */
+ ret = 0;
goto err_out;
}
diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c
index f098e25b6b41..86c10599d9f8 100644
--- a/drivers/acpi/acpi_processor.c
+++ b/drivers/acpi/acpi_processor.c
@@ -670,7 +670,7 @@ err:
}
-void __init acpi_processor_check_duplicates(void)
+static void __init acpi_processor_check_duplicates(void)
{
/* check the correctness for all processors in ACPI namespace */
acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT,
diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
index e88fe3632dd6..0972ec0e2eb8 100644
--- a/drivers/acpi/acpi_video.c
+++ b/drivers/acpi/acpi_video.c
@@ -418,7 +418,7 @@ static int video_set_report_key_events(const struct dmi_system_id *id)
return 0;
}
-static struct dmi_system_id video_dmi_table[] = {
+static const struct dmi_system_id video_dmi_table[] = {
/*
* Broken _BQC workaround http://bugzilla.kernel.org/show_bug.cgi?id=13121
*/
diff --git a/drivers/acpi/acpica/Makefile b/drivers/acpi/acpica/Makefile
index b125bdd3d58b..1709551bc4aa 100644
--- a/drivers/acpi/acpica/Makefile
+++ b/drivers/acpi/acpica/Makefile
@@ -18,6 +18,7 @@ acpi-y := \
dsmthdat.o \
dsobject.o \
dsopcode.o \
+ dspkginit.o \
dsutils.o \
dswexec.o \
dswload.o \
diff --git a/drivers/acpi/acpica/acapps.h b/drivers/acpi/acpica/acapps.h
index bb6a84b0b4b3..7a1a68b5ac5c 100644
--- a/drivers/acpi/acpica/acapps.h
+++ b/drivers/acpi/acpica/acapps.h
@@ -114,6 +114,8 @@ ac_get_all_tables_from_file(char *filename,
u8 get_only_aml_tables,
struct acpi_new_table_desc **return_list_head);
+void ac_delete_table_list(struct acpi_new_table_desc *list_head);
+
u8 ac_is_file_binary(FILE * file);
acpi_status ac_validate_table_header(FILE * file, long table_offset);
diff --git a/drivers/acpi/acpica/acdispat.h b/drivers/acpi/acpica/acdispat.h
index 0d95c85cce06..f8f3a6e74128 100644
--- a/drivers/acpi/acpica/acdispat.h
+++ b/drivers/acpi/acpica/acdispat.h
@@ -237,6 +237,11 @@ acpi_ds_initialize_objects(u32 table_index,
* dsobject - Parser/Interpreter interface - object initialization and conversion
*/
acpi_status
+acpi_ds_build_internal_object(struct acpi_walk_state *walk_state,
+ union acpi_parse_object *op,
+ union acpi_operand_object **obj_desc_ptr);
+
+acpi_status
acpi_ds_build_internal_buffer_obj(struct acpi_walk_state *walk_state,
union acpi_parse_object *op,
u32 buffer_length,
@@ -259,6 +264,14 @@ acpi_ds_create_node(struct acpi_walk_state *walk_state,
union acpi_parse_object *op);
/*
+ * dspkginit - Package object initialization
+ */
+acpi_status
+acpi_ds_init_package_element(u8 object_type,
+ union acpi_operand_object *source_object,
+ union acpi_generic_state *state, void *context);
+
+/*
* dsutils - Parser/Interpreter interface utility routines
*/
void acpi_ds_clear_implicit_return(struct acpi_walk_state *walk_state);
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index 8ddd3b20e0c6..0d45b8bb1678 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -199,6 +199,7 @@ struct acpi_namespace_node {
#define ANOBJ_EVALUATED 0x20 /* Set on first evaluation of node */
#define ANOBJ_ALLOCATED_BUFFER 0x40 /* Method AML buffer is dynamic (install_method) */
+#define IMPLICIT_EXTERNAL 0x02 /* iASL only: This object created implicitly via External */
#define ANOBJ_IS_EXTERNAL 0x08 /* iASL only: This object created via External() */
#define ANOBJ_METHOD_NO_RETVAL 0x10 /* iASL only: Method has no return value */
#define ANOBJ_METHOD_SOME_NO_RETVAL 0x20 /* iASL only: Method has at least one return value */
@@ -604,7 +605,7 @@ struct acpi_update_state {
* Pkg state - used to traverse nested package structures
*/
struct acpi_pkg_state {
- ACPI_STATE_COMMON u16 index;
+ ACPI_STATE_COMMON u32 index;
union acpi_operand_object *source_object;
union acpi_operand_object *dest_object;
struct acpi_walk_state *walk_state;
@@ -867,7 +868,7 @@ struct acpi_parse_obj_named {
/* This version is used by the iASL compiler only */
-#define ACPI_MAX_PARSEOP_NAME 20
+#define ACPI_MAX_PARSEOP_NAME 20
struct acpi_parse_obj_asl {
ACPI_PARSE_COMMON union acpi_parse_object *child;
@@ -907,7 +908,7 @@ union acpi_parse_object {
struct asl_comment_state {
u8 comment_type;
u32 spaces_before;
- union acpi_parse_object *latest_parse_node;
+ union acpi_parse_object *latest_parse_op;
union acpi_parse_object *parsing_paren_brace_node;
u8 capture_comments;
};
diff --git a/drivers/acpi/acpica/acobject.h b/drivers/acpi/acpica/acobject.h
index 27c3f982d810..5226146190bf 100644
--- a/drivers/acpi/acpica/acobject.h
+++ b/drivers/acpi/acpica/acobject.h
@@ -122,7 +122,9 @@ struct acpi_object_integer {
_type *pointer; \
u32 length;
-struct acpi_object_string { /* Null terminated, ASCII characters only */
+/* Null terminated, ASCII characters only */
+
+struct acpi_object_string {
ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_BUFFER_INFO(char) /* String in AML stream or allocated string */
};
@@ -211,7 +213,9 @@ struct acpi_object_method {
union acpi_operand_object *notify_list[2]; /* Handlers for system/device notifies */\
union acpi_operand_object *handler; /* Handler for Address space */
-struct acpi_object_notify_common { /* COMMON NOTIFY for POWER, PROCESSOR, DEVICE, and THERMAL */
+/* COMMON NOTIFY for POWER, PROCESSOR, DEVICE, and THERMAL */
+
+struct acpi_object_notify_common {
ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_NOTIFY_INFO};
struct acpi_object_device {
@@ -258,7 +262,9 @@ ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_NOTIFY_INFO};
u8 access_length; /* For serial regions/fields */
-struct acpi_object_field_common { /* COMMON FIELD (for BUFFER, REGION, BANK, and INDEX fields) */
+/* COMMON FIELD (for BUFFER, REGION, BANK, and INDEX fields) */
+
+struct acpi_object_field_common {
ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_FIELD_INFO union acpi_operand_object *region_obj; /* Parent Operation Region object (REGION/BANK fields only) */
};
@@ -333,11 +339,12 @@ struct acpi_object_addr_handler {
struct acpi_object_reference {
ACPI_OBJECT_COMMON_HEADER u8 class; /* Reference Class */
u8 target_type; /* Used for Index Op */
- u8 reserved;
+ u8 resolved; /* Reference has been resolved to a value */
void *object; /* name_op=>HANDLE to obj, index_op=>union acpi_operand_object */
struct acpi_namespace_node *node; /* ref_of or Namepath */
union acpi_operand_object **where; /* Target of Index */
u8 *index_pointer; /* Used for Buffers and Strings */
+ u8 *aml; /* Used for deferred resolution of the ref */
u32 value; /* Used for Local/Arg/Index/ddb_handle */
};
diff --git a/drivers/acpi/acpica/actables.h b/drivers/acpi/acpica/actables.h
index c8da453bd960..84a3ceb6e384 100644
--- a/drivers/acpi/acpica/actables.h
+++ b/drivers/acpi/acpica/actables.h
@@ -76,7 +76,8 @@ void acpi_tb_release_temp_table(struct acpi_table_desc *table_desc);
acpi_status acpi_tb_validate_temp_table(struct acpi_table_desc *table_desc);
acpi_status
-acpi_tb_verify_temp_table(struct acpi_table_desc *table_desc, char *signature);
+acpi_tb_verify_temp_table(struct acpi_table_desc *table_desc,
+ char *signature, u32 *table_index);
u8 acpi_tb_is_table_loaded(u32 table_index);
@@ -132,6 +133,8 @@ acpi_tb_install_and_load_table(acpi_physical_address address,
acpi_status acpi_tb_unload_table(u32 table_index);
+void acpi_tb_notify_table(u32 event, void *table);
+
void acpi_tb_terminate(void);
acpi_status acpi_tb_delete_namespace_by_owner(u32 table_index);
diff --git a/drivers/acpi/acpica/acutils.h b/drivers/acpi/acpica/acutils.h
index 2a3cc4296481..745134ade35f 100644
--- a/drivers/acpi/acpica/acutils.h
+++ b/drivers/acpi/acpica/acutils.h
@@ -516,7 +516,7 @@ union acpi_generic_state *acpi_ut_create_update_state(union acpi_operand_object
union acpi_generic_state *acpi_ut_create_pkg_state(void *internal_object,
void *external_object,
- u16 index);
+ u32 index);
acpi_status
acpi_ut_create_update_state_and_push(union acpi_operand_object *object,
@@ -538,6 +538,13 @@ acpi_status
acpi_ut_short_divide(u64 in_dividend,
u32 divisor, u64 *out_quotient, u32 *out_remainder);
+acpi_status
+acpi_ut_short_multiply(u64 in_multiplicand, u32 multiplier, u64 *outproduct);
+
+acpi_status acpi_ut_short_shift_left(u64 operand, u32 count, u64 *out_result);
+
+acpi_status acpi_ut_short_shift_right(u64 operand, u32 count, u64 *out_result);
+
/*
* utmisc
*/
diff --git a/drivers/acpi/acpica/dbdisply.c b/drivers/acpi/acpica/dbdisply.c
index 46bf270ac525..5a606eac0c22 100644
--- a/drivers/acpi/acpica/dbdisply.c
+++ b/drivers/acpi/acpica/dbdisply.c
@@ -310,7 +310,7 @@ dump_node:
}
else {
- acpi_os_printf("Object (%p) Pathname: %s\n",
+ acpi_os_printf("Object %p: Namespace Node - Pathname: %s\n",
node, (char *)ret_buf.pointer);
}
@@ -326,7 +326,7 @@ dump_node:
obj_desc = acpi_ns_get_attached_object(node);
if (obj_desc) {
- acpi_os_printf("\nAttached Object (%p):\n", obj_desc);
+ acpi_os_printf("\nAttached Object %p:", obj_desc);
if (!acpi_os_readable
(obj_desc, sizeof(union acpi_operand_object))) {
acpi_os_printf
@@ -335,9 +335,36 @@ dump_node:
return;
}
- acpi_ut_debug_dump_buffer((void *)obj_desc,
- sizeof(union acpi_operand_object),
- display, ACPI_UINT32_MAX);
+ if (ACPI_GET_DESCRIPTOR_TYPE(((struct acpi_namespace_node *)
+ obj_desc)) ==
+ ACPI_DESC_TYPE_NAMED) {
+ acpi_os_printf(" Namespace Node - ");
+ status =
+ acpi_get_name((struct acpi_namespace_node *)
+ obj_desc,
+ ACPI_FULL_PATHNAME_NO_TRAILING,
+ &ret_buf);
+ if (ACPI_FAILURE(status)) {
+ acpi_os_printf
+ ("Could not convert name to pathname\n");
+ } else {
+ acpi_os_printf("Pathname: %s",
+ (char *)ret_buf.pointer);
+ }
+
+ acpi_os_printf("\n");
+ acpi_ut_debug_dump_buffer((void *)obj_desc,
+ sizeof(struct
+ acpi_namespace_node),
+ display, ACPI_UINT32_MAX);
+ } else {
+ acpi_os_printf("\n");
+ acpi_ut_debug_dump_buffer((void *)obj_desc,
+ sizeof(union
+ acpi_operand_object),
+ display, ACPI_UINT32_MAX);
+ }
+
acpi_ex_dump_object_descriptor(obj_desc, 1);
}
}
diff --git a/drivers/acpi/acpica/dsfield.c b/drivers/acpi/acpica/dsfield.c
index c5dccc54307d..7bcf5f5ea029 100644
--- a/drivers/acpi/acpica/dsfield.c
+++ b/drivers/acpi/acpica/dsfield.c
@@ -184,6 +184,7 @@ acpi_ds_create_buffer_field(union acpi_parse_object *op,
/* Execute flag should always be set when this function is entered */
if (!(walk_state->parse_flags & ACPI_PARSE_EXECUTE)) {
+ ACPI_ERROR((AE_INFO, "Parse execute mode is not set"));
return_ACPI_STATUS(AE_AML_INTERNAL);
}
@@ -556,6 +557,7 @@ acpi_ds_init_field_objects(union acpi_parse_object *op,
return_ACPI_STATUS(AE_OK);
}
+ ACPI_ERROR((AE_INFO, "Parse deferred mode is not set"));
return_ACPI_STATUS(AE_AML_INTERNAL);
}
diff --git a/drivers/acpi/acpica/dsobject.c b/drivers/acpi/acpica/dsobject.c
index 7df3152ed856..82448551781b 100644
--- a/drivers/acpi/acpica/dsobject.c
+++ b/drivers/acpi/acpica/dsobject.c
@@ -52,12 +52,6 @@
#define _COMPONENT ACPI_DISPATCHER
ACPI_MODULE_NAME("dsobject")
-/* Local prototypes */
-static acpi_status
-acpi_ds_build_internal_object(struct acpi_walk_state *walk_state,
- union acpi_parse_object *op,
- union acpi_operand_object **obj_desc_ptr);
-
#ifndef ACPI_NO_METHOD_EXECUTION
/*******************************************************************************
*
@@ -73,15 +67,13 @@ acpi_ds_build_internal_object(struct acpi_walk_state *walk_state,
* Simple objects are any objects other than a package object!
*
******************************************************************************/
-
-static acpi_status
+acpi_status
acpi_ds_build_internal_object(struct acpi_walk_state *walk_state,
union acpi_parse_object *op,
union acpi_operand_object **obj_desc_ptr)
{
union acpi_operand_object *obj_desc;
acpi_status status;
- acpi_object_type type;
ACPI_FUNCTION_TRACE(ds_build_internal_object);
@@ -89,140 +81,47 @@ acpi_ds_build_internal_object(struct acpi_walk_state *walk_state,
if (op->common.aml_opcode == AML_INT_NAMEPATH_OP) {
/*
* This is a named object reference. If this name was
- * previously looked up in the namespace, it was stored in this op.
- * Otherwise, go ahead and look it up now
+ * previously looked up in the namespace, it was stored in
+ * this op. Otherwise, go ahead and look it up now
*/
if (!op->common.node) {
- status = acpi_ns_lookup(walk_state->scope_info,
- op->common.value.string,
- ACPI_TYPE_ANY,
- ACPI_IMODE_EXECUTE,
- ACPI_NS_SEARCH_PARENT |
- ACPI_NS_DONT_OPEN_SCOPE, NULL,
- ACPI_CAST_INDIRECT_PTR(struct
- acpi_namespace_node,
- &(op->
- common.
- node)));
- if (ACPI_FAILURE(status)) {
-
- /* Check if we are resolving a named reference within a package */
-
- if ((status == AE_NOT_FOUND)
- && (acpi_gbl_enable_interpreter_slack)
- &&
- ((op->common.parent->common.aml_opcode ==
- AML_PACKAGE_OP)
- || (op->common.parent->common.aml_opcode ==
- AML_VARIABLE_PACKAGE_OP))) {
- /*
- * We didn't find the target and we are populating elements
- * of a package - ignore if slack enabled. Some ASL code
- * contains dangling invalid references in packages and
- * expects that no exception will be issued. Leave the
- * element as a null element. It cannot be used, but it
- * can be overwritten by subsequent ASL code - this is
- * typically the case.
- */
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Ignoring unresolved reference in package [%4.4s]\n",
- walk_state->
- scope_info->scope.
- node->name.ascii));
-
- return_ACPI_STATUS(AE_OK);
- } else {
- ACPI_ERROR_NAMESPACE(op->common.value.
- string, status);
- }
-
- return_ACPI_STATUS(status);
- }
- }
-
- /* Special object resolution for elements of a package */
-
- if ((op->common.parent->common.aml_opcode == AML_PACKAGE_OP) ||
- (op->common.parent->common.aml_opcode ==
- AML_VARIABLE_PACKAGE_OP)) {
- /*
- * Attempt to resolve the node to a value before we insert it into
- * the package. If this is a reference to a common data type,
- * resolve it immediately. According to the ACPI spec, package
- * elements can only be "data objects" or method references.
- * Attempt to resolve to an Integer, Buffer, String or Package.
- * If cannot, return the named reference (for things like Devices,
- * Methods, etc.) Buffer Fields and Fields will resolve to simple
- * objects (int/buf/str/pkg).
- *
- * NOTE: References to things like Devices, Methods, Mutexes, etc.
- * will remain as named references. This behavior is not described
- * in the ACPI spec, but it appears to be an oversight.
- */
- obj_desc =
- ACPI_CAST_PTR(union acpi_operand_object,
- op->common.node);
-
- status =
- acpi_ex_resolve_node_to_value(ACPI_CAST_INDIRECT_PTR
- (struct
- acpi_namespace_node,
- &obj_desc),
- walk_state);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
-
- /*
- * Special handling for Alias objects. We need to setup the type
- * and the Op->Common.Node to point to the Alias target. Note,
- * Alias has at most one level of indirection internally.
- */
- type = op->common.node->type;
- if (type == ACPI_TYPE_LOCAL_ALIAS) {
- type = obj_desc->common.type;
- op->common.node =
- ACPI_CAST_PTR(struct acpi_namespace_node,
- op->common.node->object);
- }
-
- switch (type) {
- /*
- * For these types, we need the actual node, not the subobject.
- * However, the subobject did not get an extra reference count above.
- *
- * TBD: should ex_resolve_node_to_value be changed to fix this?
- */
- case ACPI_TYPE_DEVICE:
- case ACPI_TYPE_THERMAL:
-
- acpi_ut_add_reference(op->common.node->object);
- /*lint -fallthrough */
- /*
- * For these types, we need the actual node, not the subobject.
- * The subobject got an extra reference count in ex_resolve_node_to_value.
- */
- case ACPI_TYPE_MUTEX:
- case ACPI_TYPE_METHOD:
- case ACPI_TYPE_POWER:
- case ACPI_TYPE_PROCESSOR:
- case ACPI_TYPE_EVENT:
- case ACPI_TYPE_REGION:
-
- /* We will create a reference object for these types below */
- break;
+ /* Check if we are resolving a named reference within a package */
- default:
+ if ((op->common.parent->common.aml_opcode ==
+ AML_PACKAGE_OP)
+ || (op->common.parent->common.aml_opcode ==
+ AML_VARIABLE_PACKAGE_OP)) {
/*
- * All other types - the node was resolved to an actual
- * object, we are done.
+ * We won't resolve package elements here, we will do this
+ * after all ACPI tables are loaded into the namespace. This
+ * behavior supports both forward references to named objects
+ * and external references to objects in other tables.
*/
- goto exit;
+ goto create_new_object;
+ } else {
+ status = acpi_ns_lookup(walk_state->scope_info,
+ op->common.value.string,
+ ACPI_TYPE_ANY,
+ ACPI_IMODE_EXECUTE,
+ ACPI_NS_SEARCH_PARENT |
+ ACPI_NS_DONT_OPEN_SCOPE,
+ NULL,
+ ACPI_CAST_INDIRECT_PTR
+ (struct
+ acpi_namespace_node,
+ &(op->common.node)));
+ if (ACPI_FAILURE(status)) {
+ ACPI_ERROR_NAMESPACE(op->common.value.
+ string, status);
+ return_ACPI_STATUS(status);
+ }
}
}
}
+create_new_object:
+
/* Create and init a new internal ACPI object */
obj_desc = acpi_ut_create_internal_object((acpi_ps_get_opcode_info
@@ -240,7 +139,27 @@ acpi_ds_build_internal_object(struct acpi_walk_state *walk_state,
return_ACPI_STATUS(status);
}
-exit:
+ /*
+ * Handling for unresolved package reference elements.
+ * These are elements that are namepaths.
+ */
+ if ((op->common.parent->common.aml_opcode == AML_PACKAGE_OP) ||
+ (op->common.parent->common.aml_opcode == AML_VARIABLE_PACKAGE_OP)) {
+ obj_desc->reference.resolved = TRUE;
+
+ if ((op->common.aml_opcode == AML_INT_NAMEPATH_OP) &&
+ !obj_desc->reference.node) {
+ /*
+ * Name was unresolved above.
+ * Get the prefix node for later lookup
+ */
+ obj_desc->reference.node =
+ walk_state->scope_info->scope.node;
+ obj_desc->reference.aml = op->common.aml;
+ obj_desc->reference.resolved = FALSE;
+ }
+ }
+
*obj_desc_ptr = obj_desc;
return_ACPI_STATUS(status);
}
@@ -351,200 +270,6 @@ acpi_ds_build_internal_buffer_obj(struct acpi_walk_state *walk_state,
/*******************************************************************************
*
- * FUNCTION: acpi_ds_build_internal_package_obj
- *
- * PARAMETERS: walk_state - Current walk state
- * op - Parser object to be translated
- * element_count - Number of elements in the package - this is
- * the num_elements argument to Package()
- * obj_desc_ptr - Where the ACPI internal object is returned
- *
- * RETURN: Status
- *
- * DESCRIPTION: Translate a parser Op package object to the equivalent
- * namespace object
- *
- * NOTE: The number of elements in the package will be always be the num_elements
- * count, regardless of the number of elements in the package list. If
- * num_elements is smaller, only that many package list elements are used.
- * if num_elements is larger, the Package object is padded out with
- * objects of type Uninitialized (as per ACPI spec.)
- *
- * Even though the ASL compilers do not allow num_elements to be smaller
- * than the Package list length (for the fixed length package opcode), some
- * BIOS code modifies the AML on the fly to adjust the num_elements, and
- * this code compensates for that. This also provides compatibility with
- * other AML interpreters.
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state,
- union acpi_parse_object *op,
- u32 element_count,
- union acpi_operand_object **obj_desc_ptr)
-{
- union acpi_parse_object *arg;
- union acpi_parse_object *parent;
- union acpi_operand_object *obj_desc = NULL;
- acpi_status status = AE_OK;
- u32 i;
- u16 index;
- u16 reference_count;
-
- ACPI_FUNCTION_TRACE(ds_build_internal_package_obj);
-
- /* Find the parent of a possibly nested package */
-
- parent = op->common.parent;
- while ((parent->common.aml_opcode == AML_PACKAGE_OP) ||
- (parent->common.aml_opcode == AML_VARIABLE_PACKAGE_OP)) {
- parent = parent->common.parent;
- }
-
- /*
- * If we are evaluating a Named package object "Name (xxxx, Package)",
- * the package object already exists, otherwise it must be created.
- */
- obj_desc = *obj_desc_ptr;
- if (!obj_desc) {
- obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_PACKAGE);
- *obj_desc_ptr = obj_desc;
- if (!obj_desc) {
- return_ACPI_STATUS(AE_NO_MEMORY);
- }
-
- obj_desc->package.node = parent->common.node;
- }
-
- /*
- * Allocate the element array (array of pointers to the individual
- * objects) based on the num_elements parameter. Add an extra pointer slot
- * so that the list is always null terminated.
- */
- obj_desc->package.elements = ACPI_ALLOCATE_ZEROED(((acpi_size)
- element_count +
- 1) * sizeof(void *));
-
- if (!obj_desc->package.elements) {
- acpi_ut_delete_object_desc(obj_desc);
- return_ACPI_STATUS(AE_NO_MEMORY);
- }
-
- obj_desc->package.count = element_count;
-
- /*
- * Initialize the elements of the package, up to the num_elements count.
- * Package is automatically padded with uninitialized (NULL) elements
- * if num_elements is greater than the package list length. Likewise,
- * Package is truncated if num_elements is less than the list length.
- */
- arg = op->common.value.arg;
- arg = arg->common.next;
- for (i = 0; arg && (i < element_count); i++) {
- if (arg->common.aml_opcode == AML_INT_RETURN_VALUE_OP) {
- if (arg->common.node->type == ACPI_TYPE_METHOD) {
- /*
- * A method reference "looks" to the parser to be a method
- * invocation, so we special case it here
- */
- arg->common.aml_opcode = AML_INT_NAMEPATH_OP;
- status =
- acpi_ds_build_internal_object(walk_state,
- arg,
- &obj_desc->
- package.
- elements[i]);
- } else {
- /* This package element is already built, just get it */
-
- obj_desc->package.elements[i] =
- ACPI_CAST_PTR(union acpi_operand_object,
- arg->common.node);
- }
- } else {
- status =
- acpi_ds_build_internal_object(walk_state, arg,
- &obj_desc->package.
- elements[i]);
- }
-
- if (*obj_desc_ptr) {
-
- /* Existing package, get existing reference count */
-
- reference_count =
- (*obj_desc_ptr)->common.reference_count;
- if (reference_count > 1) {
-
- /* Make new element ref count match original ref count */
-
- for (index = 0; index < (reference_count - 1);
- index++) {
- acpi_ut_add_reference((obj_desc->
- package.
- elements[i]));
- }
- }
- }
-
- arg = arg->common.next;
- }
-
- /* Check for match between num_elements and actual length of package_list */
-
- if (arg) {
- /*
- * num_elements was exhausted, but there are remaining elements in the
- * package_list. Truncate the package to num_elements.
- *
- * Note: technically, this is an error, from ACPI spec: "It is an error
- * for NumElements to be less than the number of elements in the
- * PackageList". However, we just print a message and
- * no exception is returned. This provides Windows compatibility. Some
- * BIOSs will alter the num_elements on the fly, creating this type
- * of ill-formed package object.
- */
- while (arg) {
- /*
- * We must delete any package elements that were created earlier
- * and are not going to be used because of the package truncation.
- */
- if (arg->common.node) {
- acpi_ut_remove_reference(ACPI_CAST_PTR
- (union
- acpi_operand_object,
- arg->common.node));
- arg->common.node = NULL;
- }
-
- /* Find out how many elements there really are */
-
- i++;
- arg = arg->common.next;
- }
-
- ACPI_INFO(("Actual Package length (%u) is larger than "
- "NumElements field (%u), truncated",
- i, element_count));
- } else if (i < element_count) {
- /*
- * Arg list (elements) was exhausted, but we did not reach num_elements count.
- * Note: this is not an error, the package is padded out with NULLs.
- */
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Package List length (%u) smaller than NumElements "
- "count (%u), padded with null elements\n",
- i, element_count));
- }
-
- obj_desc->package.flags |= AOPOBJ_DATA_VALID;
- op->common.node = ACPI_CAST_PTR(struct acpi_namespace_node, obj_desc);
- return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
* FUNCTION: acpi_ds_create_node
*
* PARAMETERS: walk_state - Current walk state
@@ -662,11 +387,20 @@ acpi_ds_init_object_from_op(struct acpi_walk_state *walk_state,
case ACPI_TYPE_PACKAGE:
/*
- * Defer evaluation of Package term_arg operand
+ * Defer evaluation of Package term_arg operand and all
+ * package elements. (01/2017): We defer the element
+ * resolution to allow forward references from the package
+ * in order to provide compatibility with other ACPI
+ * implementations.
*/
obj_desc->package.node =
ACPI_CAST_PTR(struct acpi_namespace_node,
walk_state->operands[0]);
+
+ if (!op->named.data) {
+ return_ACPI_STATUS(AE_OK);
+ }
+
obj_desc->package.aml_start = op->named.data;
obj_desc->package.aml_length = op->named.length;
break;
@@ -818,9 +552,11 @@ acpi_ds_init_object_from_op(struct acpi_walk_state *walk_state,
/* Node was saved in Op */
obj_desc->reference.node = op->common.node;
- obj_desc->reference.object =
- op->common.node->object;
obj_desc->reference.class = ACPI_REFCLASS_NAME;
+ if (op->common.node) {
+ obj_desc->reference.object =
+ op->common.node->object;
+ }
break;
case AML_DEBUG_OP:
diff --git a/drivers/acpi/acpica/dsopcode.c b/drivers/acpi/acpica/dsopcode.c
index dfc3c25a083d..0336df7ac47d 100644
--- a/drivers/acpi/acpica/dsopcode.c
+++ b/drivers/acpi/acpica/dsopcode.c
@@ -599,6 +599,15 @@ acpi_ds_eval_data_object_operands(struct acpi_walk_state *walk_state,
*/
walk_state->operand_index = walk_state->num_operands;
+ /* Ignore if child is not valid */
+
+ if (!op->common.value.arg) {
+ ACPI_ERROR((AE_INFO,
+ "Dispatch: Missing child while executing TermArg for %X",
+ op->common.aml_opcode));
+ return_ACPI_STATUS(AE_OK);
+ }
+
status = acpi_ds_create_operand(walk_state, op->common.value.arg, 1);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
diff --git a/drivers/acpi/acpica/dspkginit.c b/drivers/acpi/acpica/dspkginit.c
new file mode 100644
index 000000000000..6d487edfe2de
--- /dev/null
+++ b/drivers/acpi/acpica/dspkginit.c
@@ -0,0 +1,496 @@
+/******************************************************************************
+ *
+ * Module Name: dspkginit - Completion of deferred package initialization
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2017, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acnamesp.h"
+#include "amlcode.h"
+#include "acdispat.h"
+#include "acinterp.h"
+
+#define _COMPONENT ACPI_NAMESPACE
+ACPI_MODULE_NAME("dspkginit")
+
+/* Local prototypes */
+static void
+acpi_ds_resolve_package_element(union acpi_operand_object **element);
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ds_build_internal_package_obj
+ *
+ * PARAMETERS: walk_state - Current walk state
+ * op - Parser object to be translated
+ * element_count - Number of elements in the package - this is
+ * the num_elements argument to Package()
+ * obj_desc_ptr - Where the ACPI internal object is returned
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Translate a parser Op package object to the equivalent
+ * namespace object
+ *
+ * NOTE: The number of elements in the package will be always be the num_elements
+ * count, regardless of the number of elements in the package list. If
+ * num_elements is smaller, only that many package list elements are used.
+ * if num_elements is larger, the Package object is padded out with
+ * objects of type Uninitialized (as per ACPI spec.)
+ *
+ * Even though the ASL compilers do not allow num_elements to be smaller
+ * than the Package list length (for the fixed length package opcode), some
+ * BIOS code modifies the AML on the fly to adjust the num_elements, and
+ * this code compensates for that. This also provides compatibility with
+ * other AML interpreters.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state,
+ union acpi_parse_object *op,
+ u32 element_count,
+ union acpi_operand_object **obj_desc_ptr)
+{
+ union acpi_parse_object *arg;
+ union acpi_parse_object *parent;
+ union acpi_operand_object *obj_desc = NULL;
+ acpi_status status = AE_OK;
+ u16 reference_count;
+ u32 index;
+ u32 i;
+
+ ACPI_FUNCTION_TRACE(ds_build_internal_package_obj);
+
+ /* Find the parent of a possibly nested package */
+
+ parent = op->common.parent;
+ while ((parent->common.aml_opcode == AML_PACKAGE_OP) ||
+ (parent->common.aml_opcode == AML_VARIABLE_PACKAGE_OP)) {
+ parent = parent->common.parent;
+ }
+
+ /*
+ * If we are evaluating a Named package object of the form:
+ * Name (xxxx, Package)
+ * the package object already exists, otherwise it must be created.
+ */
+ obj_desc = *obj_desc_ptr;
+ if (!obj_desc) {
+ obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_PACKAGE);
+ *obj_desc_ptr = obj_desc;
+ if (!obj_desc) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ obj_desc->package.node = parent->common.node;
+ }
+
+ if (obj_desc->package.flags & AOPOBJ_DATA_VALID) { /* Just in case */
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ /*
+ * Allocate the element array (array of pointers to the individual
+ * objects) based on the num_elements parameter. Add an extra pointer slot
+ * so that the list is always null terminated.
+ */
+ obj_desc->package.elements = ACPI_ALLOCATE_ZEROED(((acpi_size)
+ element_count +
+ 1) * sizeof(void *));
+
+ if (!obj_desc->package.elements) {
+ acpi_ut_delete_object_desc(obj_desc);
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ obj_desc->package.count = element_count;
+ arg = op->common.value.arg;
+ arg = arg->common.next;
+
+ if (arg) {
+ obj_desc->package.flags |= AOPOBJ_DATA_VALID;
+ }
+
+ /*
+ * Initialize the elements of the package, up to the num_elements count.
+ * Package is automatically padded with uninitialized (NULL) elements
+ * if num_elements is greater than the package list length. Likewise,
+ * Package is truncated if num_elements is less than the list length.
+ */
+ for (i = 0; arg && (i < element_count); i++) {
+ if (arg->common.aml_opcode == AML_INT_RETURN_VALUE_OP) {
+ if (arg->common.node->type == ACPI_TYPE_METHOD) {
+ /*
+ * A method reference "looks" to the parser to be a method
+ * invocation, so we special case it here
+ */
+ arg->common.aml_opcode = AML_INT_NAMEPATH_OP;
+ status =
+ acpi_ds_build_internal_object(walk_state,
+ arg,
+ &obj_desc->
+ package.
+ elements[i]);
+ } else {
+ /* This package element is already built, just get it */
+
+ obj_desc->package.elements[i] =
+ ACPI_CAST_PTR(union acpi_operand_object,
+ arg->common.node);
+ }
+ } else {
+ status =
+ acpi_ds_build_internal_object(walk_state, arg,
+ &obj_desc->package.
+ elements[i]);
+ if (status == AE_NOT_FOUND) {
+ ACPI_ERROR((AE_INFO, "%-48s",
+ "****DS namepath not found"));
+ }
+
+ /*
+ * Initialize this package element. This function handles the
+ * resolution of named references within the package.
+ */
+ acpi_ds_init_package_element(0,
+ obj_desc->package.
+ elements[i], NULL,
+ &obj_desc->package.
+ elements[i]);
+ }
+
+ if (*obj_desc_ptr) {
+
+ /* Existing package, get existing reference count */
+
+ reference_count =
+ (*obj_desc_ptr)->common.reference_count;
+ if (reference_count > 1) {
+
+ /* Make new element ref count match original ref count */
+ /* TBD: Probably need an acpi_ut_add_references function */
+
+ for (index = 0;
+ index < ((u32)reference_count - 1);
+ index++) {
+ acpi_ut_add_reference((obj_desc->
+ package.
+ elements[i]));
+ }
+ }
+ }
+
+ arg = arg->common.next;
+ }
+
+ /* Check for match between num_elements and actual length of package_list */
+
+ if (arg) {
+ /*
+ * num_elements was exhausted, but there are remaining elements in
+ * the package_list. Truncate the package to num_elements.
+ *
+ * Note: technically, this is an error, from ACPI spec: "It is an
+ * error for NumElements to be less than the number of elements in
+ * the PackageList". However, we just print a message and no
+ * exception is returned. This provides compatibility with other
+ * ACPI implementations. Some firmware implementations will alter
+ * the num_elements on the fly, possibly creating this type of
+ * ill-formed package object.
+ */
+ while (arg) {
+ /*
+ * We must delete any package elements that were created earlier
+ * and are not going to be used because of the package truncation.
+ */
+ if (arg->common.node) {
+ acpi_ut_remove_reference(ACPI_CAST_PTR
+ (union
+ acpi_operand_object,
+ arg->common.node));
+ arg->common.node = NULL;
+ }
+
+ /* Find out how many elements there really are */
+
+ i++;
+ arg = arg->common.next;
+ }
+
+ ACPI_INFO(("Actual Package length (%u) is larger than "
+ "NumElements field (%u), truncated",
+ i, element_count));
+ } else if (i < element_count) {
+ /*
+ * Arg list (elements) was exhausted, but we did not reach
+ * num_elements count.
+ *
+ * Note: this is not an error, the package is padded out
+ * with NULLs.
+ */
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Package List length (%u) smaller than NumElements "
+ "count (%u), padded with null elements\n",
+ i, element_count));
+ }
+
+ obj_desc->package.flags |= AOPOBJ_DATA_VALID;
+ op->common.node = ACPI_CAST_PTR(struct acpi_namespace_node, obj_desc);
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ds_init_package_element
+ *
+ * PARAMETERS: acpi_pkg_callback
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Resolve a named reference element within a package object
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ds_init_package_element(u8 object_type,
+ union acpi_operand_object *source_object,
+ union acpi_generic_state *state, void *context)
+{
+ union acpi_operand_object **element_ptr;
+
+ if (!source_object) {
+ return (AE_OK);
+ }
+
+ /*
+ * The following code is a bit of a hack to workaround a (current)
+ * limitation of the acpi_pkg_callback interface. We need a pointer
+ * to the location within the element array because a new object
+ * may be created and stored there.
+ */
+ if (context) {
+
+ /* A direct call was made to this function */
+
+ element_ptr = (union acpi_operand_object **)context;
+ } else {
+ /* Call came from acpi_ut_walk_package_tree */
+
+ element_ptr = state->pkg.this_target_obj;
+ }
+
+ /* We are only interested in reference objects/elements */
+
+ if (source_object->common.type == ACPI_TYPE_LOCAL_REFERENCE) {
+
+ /* Attempt to resolve the (named) reference to a namespace node */
+
+ acpi_ds_resolve_package_element(element_ptr);
+ } else if (source_object->common.type == ACPI_TYPE_PACKAGE) {
+ source_object->package.flags |= AOPOBJ_DATA_VALID;
+ }
+
+ return (AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ds_resolve_package_element
+ *
+ * PARAMETERS: element_ptr - Pointer to a reference object
+ *
+ * RETURN: Possible new element is stored to the indirect element_ptr
+ *
+ * DESCRIPTION: Resolve a package element that is a reference to a named
+ * object.
+ *
+ ******************************************************************************/
+
+static void
+acpi_ds_resolve_package_element(union acpi_operand_object **element_ptr)
+{
+ acpi_status status;
+ union acpi_generic_state scope_info;
+ union acpi_operand_object *element = *element_ptr;
+ struct acpi_namespace_node *resolved_node;
+ char *external_path = NULL;
+ acpi_object_type type;
+
+ ACPI_FUNCTION_TRACE(ds_resolve_package_element);
+
+ /* Check if reference element is already resolved */
+
+ if (element->reference.resolved) {
+ return_VOID;
+ }
+
+ /* Element must be a reference object of correct type */
+
+ scope_info.scope.node = element->reference.node; /* Prefix node */
+
+ status = acpi_ns_lookup(&scope_info, (char *)element->reference.aml, /* Pointer to AML path */
+ ACPI_TYPE_ANY, ACPI_IMODE_EXECUTE,
+ ACPI_NS_SEARCH_PARENT | ACPI_NS_DONT_OPEN_SCOPE,
+ NULL, &resolved_node);
+ if (ACPI_FAILURE(status)) {
+ status = acpi_ns_externalize_name(ACPI_UINT32_MAX,
+ (char *)element->reference.
+ aml, NULL, &external_path);
+
+ ACPI_EXCEPTION((AE_INFO, status,
+ "Could not find/resolve named package element: %s",
+ external_path));
+
+ ACPI_FREE(external_path);
+ *element_ptr = NULL;
+ return_VOID;
+ } else if (resolved_node->type == ACPI_TYPE_ANY) {
+
+ /* Named reference not resolved, return a NULL package element */
+
+ ACPI_ERROR((AE_INFO,
+ "Could not resolve named package element [%4.4s] in [%4.4s]",
+ resolved_node->name.ascii,
+ scope_info.scope.node->name.ascii));
+ *element_ptr = NULL;
+ return_VOID;
+ }
+#if 0
+ else if (resolved_node->flags & ANOBJ_TEMPORARY) {
+ /*
+ * A temporary node found here indicates that the reference is
+ * to a node that was created within this method. We are not
+ * going to allow it (especially if the package is returned
+ * from the method) -- the temporary node will be deleted out
+ * from under the method. (05/2017).
+ */
+ ACPI_ERROR((AE_INFO,
+ "Package element refers to a temporary name [%4.4s], "
+ "inserting a NULL element",
+ resolved_node->name.ascii));
+ *element_ptr = NULL;
+ return_VOID;
+ }
+#endif
+
+ /*
+ * Special handling for Alias objects. We need resolved_node to point
+ * to the Alias target. This effectively "resolves" the alias.
+ */
+ if (resolved_node->type == ACPI_TYPE_LOCAL_ALIAS) {
+ resolved_node = ACPI_CAST_PTR(struct acpi_namespace_node,
+ resolved_node->object);
+ }
+
+ /* Update the reference object */
+
+ element->reference.resolved = TRUE;
+ element->reference.node = resolved_node;
+ type = element->reference.node->type;
+
+ /*
+ * Attempt to resolve the node to a value before we insert it into
+ * the package. If this is a reference to a common data type,
+ * resolve it immediately. According to the ACPI spec, package
+ * elements can only be "data objects" or method references.
+ * Attempt to resolve to an Integer, Buffer, String or Package.
+ * If cannot, return the named reference (for things like Devices,
+ * Methods, etc.) Buffer Fields and Fields will resolve to simple
+ * objects (int/buf/str/pkg).
+ *
+ * NOTE: References to things like Devices, Methods, Mutexes, etc.
+ * will remain as named references. This behavior is not described
+ * in the ACPI spec, but it appears to be an oversight.
+ */
+ status = acpi_ex_resolve_node_to_value(&resolved_node, NULL);
+ if (ACPI_FAILURE(status)) {
+ return_VOID;
+ }
+#if 0
+/* TBD - alias support */
+ /*
+ * Special handling for Alias objects. We need to setup the type
+ * and the Op->Common.Node to point to the Alias target. Note,
+ * Alias has at most one level of indirection internally.
+ */
+ type = op->common.node->type;
+ if (type == ACPI_TYPE_LOCAL_ALIAS) {
+ type = obj_desc->common.type;
+ op->common.node = ACPI_CAST_PTR(struct acpi_namespace_node,
+ op->common.node->object);
+ }
+#endif
+
+ switch (type) {
+ /*
+ * These object types are a result of named references, so we will
+ * leave them as reference objects. In other words, these types
+ * have no intrinsic "value".
+ */
+ case ACPI_TYPE_DEVICE:
+ case ACPI_TYPE_THERMAL:
+
+ /* TBD: This may not be necesssary */
+
+ acpi_ut_add_reference(resolved_node->object);
+ break;
+
+ case ACPI_TYPE_MUTEX:
+ case ACPI_TYPE_METHOD:
+ case ACPI_TYPE_POWER:
+ case ACPI_TYPE_PROCESSOR:
+ case ACPI_TYPE_EVENT:
+ case ACPI_TYPE_REGION:
+
+ break;
+
+ default:
+ /*
+ * For all other types - the node was resolved to an actual
+ * operand object with a value, return the object
+ */
+ *element_ptr = (union acpi_operand_object *)resolved_node;
+ break;
+ }
+
+ return_VOID;
+}
diff --git a/drivers/acpi/acpica/evgpeblk.c b/drivers/acpi/acpica/evgpeblk.c
index 9c941947a063..3a3cb8624f41 100644
--- a/drivers/acpi/acpica/evgpeblk.c
+++ b/drivers/acpi/acpica/evgpeblk.c
@@ -440,9 +440,11 @@ acpi_ev_initialize_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
void *ignored)
{
acpi_status status;
+ acpi_event_status event_status;
struct acpi_gpe_event_info *gpe_event_info;
u32 gpe_enabled_count;
u32 gpe_index;
+ u32 gpe_number;
u32 i;
u32 j;
@@ -470,30 +472,40 @@ acpi_ev_initialize_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
gpe_index = (i * ACPI_GPE_REGISTER_WIDTH) + j;
gpe_event_info = &gpe_block->event_info[gpe_index];
+ gpe_number = gpe_block->block_base_number + gpe_index;
/*
* Ignore GPEs that have no corresponding _Lxx/_Exx method
- * and GPEs that are used to wake the system
+ * and GPEs that are used for wakeup
*/
- if ((ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) ==
- ACPI_GPE_DISPATCH_NONE)
- || (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) ==
- ACPI_GPE_DISPATCH_HANDLER)
- || (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) ==
- ACPI_GPE_DISPATCH_RAW_HANDLER)
+ if ((ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) !=
+ ACPI_GPE_DISPATCH_METHOD)
|| (gpe_event_info->flags & ACPI_GPE_CAN_WAKE)) {
continue;
}
+ event_status = 0;
+ (void)acpi_hw_get_gpe_status(gpe_event_info,
+ &event_status);
+
status = acpi_ev_add_gpe_reference(gpe_event_info);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
"Could not enable GPE 0x%02X",
- gpe_index +
- gpe_block->block_base_number));
+ gpe_number));
continue;
}
+ gpe_event_info->flags |= ACPI_GPE_AUTO_ENABLED;
+
+ if (event_status & ACPI_EVENT_FLAG_STATUS_SET) {
+ ACPI_INFO(("GPE 0x%02X active on init",
+ gpe_number));
+ (void)acpi_ev_gpe_dispatch(gpe_block->node,
+ gpe_event_info,
+ gpe_number);
+ }
+
gpe_enabled_count++;
}
}
diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c
index 57718a3e029a..67c7c4ce276c 100644
--- a/drivers/acpi/acpica/evxfgpe.c
+++ b/drivers/acpi/acpica/evxfgpe.c
@@ -435,6 +435,14 @@ acpi_setup_gpe_for_wake(acpi_handle wake_device,
*/
gpe_event_info->flags =
(ACPI_GPE_DISPATCH_NOTIFY | ACPI_GPE_LEVEL_TRIGGERED);
+ } else if (gpe_event_info->flags & ACPI_GPE_AUTO_ENABLED) {
+ /*
+ * A reference to this GPE has been added during the GPE block
+ * initialization, so drop it now to prevent the GPE from being
+ * permanently enabled and clear its ACPI_GPE_AUTO_ENABLED flag.
+ */
+ (void)acpi_ev_remove_gpe_reference(gpe_event_info);
+ gpe_event_info->flags &= ~ACPI_GPE_AUTO_ENABLED;
}
/*
diff --git a/drivers/acpi/acpica/excreate.c b/drivers/acpi/acpica/excreate.c
index d43d7da4c734..b8adb11f1b07 100644
--- a/drivers/acpi/acpica/excreate.c
+++ b/drivers/acpi/acpica/excreate.c
@@ -87,68 +87,40 @@ acpi_status acpi_ex_create_alias(struct acpi_walk_state *walk_state)
target_node->object);
}
- /*
- * For objects that can never change (i.e., the NS node will
- * permanently point to the same object), we can simply attach
- * the object to the new NS node. For other objects (such as
- * Integers, buffers, etc.), we have to point the Alias node
- * to the original Node.
- */
- switch (target_node->type) {
+ /* Ensure that the target node is valid */
- /* For these types, the sub-object can change dynamically via a Store */
+ if (!target_node) {
+ return_ACPI_STATUS(AE_NULL_OBJECT);
+ }
- case ACPI_TYPE_INTEGER:
- case ACPI_TYPE_STRING:
- case ACPI_TYPE_BUFFER:
- case ACPI_TYPE_PACKAGE:
- case ACPI_TYPE_BUFFER_FIELD:
- /*
- * These types open a new scope, so we need the NS node in order to access
- * any children.
- */
- case ACPI_TYPE_DEVICE:
- case ACPI_TYPE_POWER:
- case ACPI_TYPE_PROCESSOR:
- case ACPI_TYPE_THERMAL:
- case ACPI_TYPE_LOCAL_SCOPE:
- /*
- * The new alias has the type ALIAS and points to the original
- * NS node, not the object itself.
- */
- alias_node->type = ACPI_TYPE_LOCAL_ALIAS;
- alias_node->object =
- ACPI_CAST_PTR(union acpi_operand_object, target_node);
- break;
+ /* Construct the alias object (a namespace node) */
+ switch (target_node->type) {
case ACPI_TYPE_METHOD:
/*
- * Control method aliases need to be differentiated
+ * Control method aliases need to be differentiated with
+ * a special type
*/
alias_node->type = ACPI_TYPE_LOCAL_METHOD_ALIAS;
- alias_node->object =
- ACPI_CAST_PTR(union acpi_operand_object, target_node);
break;
default:
-
- /* Attach the original source object to the new Alias Node */
-
/*
- * The new alias assumes the type of the target, and it points
- * to the same object. The reference count of the object has an
- * additional reference to prevent deletion out from under either the
- * target node or the alias Node
+ * All other object types.
+ *
+ * The new alias has the type ALIAS and points to the original
+ * NS node, not the object itself.
*/
- status = acpi_ns_attach_object(alias_node,
- acpi_ns_get_attached_object
- (target_node),
- target_node->type);
+ alias_node->type = ACPI_TYPE_LOCAL_ALIAS;
+ alias_node->object =
+ ACPI_CAST_PTR(union acpi_operand_object, target_node);
break;
}
/* Since both operands are Nodes, we don't need to delete them */
+ alias_node->object =
+ ACPI_CAST_PTR(union acpi_operand_object, target_node);
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/exdump.c b/drivers/acpi/acpica/exdump.c
index 44092f744477..83398dc4b7c2 100644
--- a/drivers/acpi/acpica/exdump.c
+++ b/drivers/acpi/acpica/exdump.c
@@ -102,7 +102,7 @@ static struct acpi_exdump_info acpi_ex_dump_package[6] = {
{ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_package), NULL},
{ACPI_EXD_NODE, ACPI_EXD_OFFSET(package.node), "Parent Node"},
{ACPI_EXD_UINT8, ACPI_EXD_OFFSET(package.flags), "Flags"},
- {ACPI_EXD_UINT32, ACPI_EXD_OFFSET(package.count), "Elements"},
+ {ACPI_EXD_UINT32, ACPI_EXD_OFFSET(package.count), "Element Count"},
{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(package.elements), "Element List"},
{ACPI_EXD_PACKAGE, 0, NULL}
};
@@ -384,6 +384,10 @@ acpi_ex_dump_object(union acpi_operand_object *obj_desc,
count = info->offset;
while (count) {
+ if (!obj_desc) {
+ return;
+ }
+
target = ACPI_ADD_PTR(u8, obj_desc, info->offset);
name = info->name;
@@ -469,9 +473,9 @@ acpi_ex_dump_object(union acpi_operand_object *obj_desc,
start = *ACPI_CAST_PTR(void *, target);
next = start;
- acpi_os_printf("%20s : %p", name, next);
+ acpi_os_printf("%20s : %p ", name, next);
if (next) {
- acpi_os_printf("(%s %2.2X)",
+ acpi_os_printf("%s (Type %2.2X)",
acpi_ut_get_object_type_name
(next), next->common.type);
@@ -493,6 +497,8 @@ acpi_ex_dump_object(union acpi_operand_object *obj_desc,
break;
}
}
+ } else {
+ acpi_os_printf("- No attached objects");
}
acpi_os_printf("\n");
@@ -1129,7 +1135,9 @@ acpi_ex_dump_package_obj(union acpi_operand_object *obj_desc,
default:
- acpi_os_printf("[Unknown Type] %X\n", obj_desc->common.type);
+ acpi_os_printf("[%s] Type: %2.2X\n",
+ acpi_ut_get_type_name(obj_desc->common.type),
+ obj_desc->common.type);
break;
}
}
@@ -1167,11 +1175,17 @@ acpi_ex_dump_object_descriptor(union acpi_operand_object *obj_desc, u32 flags)
acpi_ex_dump_namespace_node((struct acpi_namespace_node *)
obj_desc, flags);
- acpi_os_printf("\nAttached Object (%p):\n",
- ((struct acpi_namespace_node *)obj_desc)->
- object);
-
obj_desc = ((struct acpi_namespace_node *)obj_desc)->object;
+ if (!obj_desc) {
+ return_VOID;
+ }
+
+ acpi_os_printf("\nAttached Object %p", obj_desc);
+ if (ACPI_GET_DESCRIPTOR_TYPE(obj_desc) == ACPI_DESC_TYPE_NAMED) {
+ acpi_os_printf(" - Namespace Node");
+ }
+
+ acpi_os_printf(":\n");
goto dump_object;
}
@@ -1191,6 +1205,10 @@ acpi_ex_dump_object_descriptor(union acpi_operand_object *obj_desc, u32 flags)
dump_object:
+ if (!obj_desc) {
+ return_VOID;
+ }
+
/* Common Fields */
acpi_ex_dump_object(obj_desc, acpi_ex_dump_common);
diff --git a/drivers/acpi/acpica/exmisc.c b/drivers/acpi/acpica/exmisc.c
index f222a80ca38e..1e7649ce0a7b 100644
--- a/drivers/acpi/acpica/exmisc.c
+++ b/drivers/acpi/acpica/exmisc.c
@@ -265,6 +265,8 @@ acpi_ex_do_logical_numeric_op(u16 opcode,
default:
+ ACPI_ERROR((AE_INFO,
+ "Invalid numeric logical opcode: %X", opcode));
status = AE_AML_INTERNAL;
break;
}
@@ -345,6 +347,9 @@ acpi_ex_do_logical_op(u16 opcode,
default:
+ ACPI_ERROR((AE_INFO,
+ "Invalid object type for logical operator: %X",
+ operand0->common.type));
status = AE_AML_INTERNAL;
break;
}
@@ -388,6 +393,8 @@ acpi_ex_do_logical_op(u16 opcode,
default:
+ ACPI_ERROR((AE_INFO,
+ "Invalid comparison opcode: %X", opcode));
status = AE_AML_INTERNAL;
break;
}
@@ -456,6 +463,8 @@ acpi_ex_do_logical_op(u16 opcode,
default:
+ ACPI_ERROR((AE_INFO,
+ "Invalid comparison opcode: %X", opcode));
status = AE_AML_INTERNAL;
break;
}
diff --git a/drivers/acpi/acpica/exoparg2.c b/drivers/acpi/acpica/exoparg2.c
index eecb3bff7fd7..57980b7d3594 100644
--- a/drivers/acpi/acpica/exoparg2.c
+++ b/drivers/acpi/acpica/exoparg2.c
@@ -414,6 +414,9 @@ acpi_status acpi_ex_opcode_2A_1T_1R(struct acpi_walk_state *walk_state)
default:
+ ACPI_ERROR((AE_INFO,
+ "Invalid object type: %X",
+ (operand[0])->common.type));
status = AE_AML_INTERNAL;
goto cleanup;
}
diff --git a/drivers/acpi/acpica/hwregs.c b/drivers/acpi/acpica/hwregs.c
index de74a4c25085..acb417b58bbb 100644
--- a/drivers/acpi/acpica/hwregs.c
+++ b/drivers/acpi/acpica/hwregs.c
@@ -107,7 +107,7 @@ acpi_hw_get_access_bit_width(u64 address,
ACPI_IS_ALIGNED(reg->bit_width, 8)) {
access_bit_width = reg->bit_width;
} else if (reg->access_width) {
- access_bit_width = (1 << (reg->access_width + 2));
+ access_bit_width = ACPI_ACCESS_BIT_WIDTH(reg->access_width);
} else {
access_bit_width =
ACPI_ROUND_UP_POWER_OF_TWO_8(reg->bit_offset +
diff --git a/drivers/acpi/acpica/hwxfsleep.c b/drivers/acpi/acpica/hwxfsleep.c
index 7ef13934968f..e5c095ca6083 100644
--- a/drivers/acpi/acpica/hwxfsleep.c
+++ b/drivers/acpi/acpica/hwxfsleep.c
@@ -72,13 +72,16 @@ static acpi_status acpi_hw_sleep_dispatch(u8 sleep_state, u32 function_id);
static struct acpi_sleep_functions acpi_sleep_dispatch[] = {
{ACPI_STRUCT_INIT(legacy_function,
ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_sleep)),
- ACPI_STRUCT_INIT(extended_function, acpi_hw_extended_sleep) },
+ ACPI_STRUCT_INIT(extended_function,
+ acpi_hw_extended_sleep)},
{ACPI_STRUCT_INIT(legacy_function,
ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake_prep)),
- ACPI_STRUCT_INIT(extended_function, acpi_hw_extended_wake_prep) },
+ ACPI_STRUCT_INIT(extended_function,
+ acpi_hw_extended_wake_prep)},
{ACPI_STRUCT_INIT(legacy_function,
ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake)),
- ACPI_STRUCT_INIT(extended_function, acpi_hw_extended_wake) }
+ ACPI_STRUCT_INIT(extended_function,
+ acpi_hw_extended_wake)}
};
/*
diff --git a/drivers/acpi/acpica/nsaccess.c b/drivers/acpi/acpica/nsaccess.c
index e5f4fa496572..f2733f51ca8d 100644
--- a/drivers/acpi/acpica/nsaccess.c
+++ b/drivers/acpi/acpica/nsaccess.c
@@ -292,6 +292,7 @@ acpi_ns_lookup(union acpi_generic_state *scope_info,
{
acpi_status status;
char *path = pathname;
+ char *external_path;
struct acpi_namespace_node *prefix_node;
struct acpi_namespace_node *current_node = NULL;
struct acpi_namespace_node *this_node = NULL;
@@ -427,13 +428,22 @@ acpi_ns_lookup(union acpi_generic_state *scope_info,
num_carats++;
this_node = this_node->parent;
if (!this_node) {
+ /*
+ * Current scope has no parent scope. Externalize
+ * the internal path for error message.
+ */
+ status =
+ acpi_ns_externalize_name
+ (ACPI_UINT32_MAX, pathname, NULL,
+ &external_path);
+ if (ACPI_SUCCESS(status)) {
+ ACPI_ERROR((AE_INFO,
+ "%s: Path has too many parent prefixes (^)",
+ external_path));
+
+ ACPI_FREE(external_path);
+ }
- /* Current scope has no parent scope */
-
- ACPI_ERROR((AE_INFO,
- "%s: Path has too many parent prefixes (^) "
- "- reached beyond root node",
- pathname));
return_ACPI_STATUS(AE_NOT_FOUND);
}
}
@@ -634,6 +644,12 @@ acpi_ns_lookup(union acpi_generic_state *scope_info,
this_node->object;
}
}
+#ifdef ACPI_ASL_COMPILER
+ if (!acpi_gbl_disasm_flag &&
+ (this_node->flags & ANOBJ_IS_EXTERNAL)) {
+ this_node->flags |= IMPLICIT_EXTERNAL;
+ }
+#endif
}
/* Special handling for the last segment (num_segments == 0) */
diff --git a/drivers/acpi/acpica/nsarguments.c b/drivers/acpi/acpica/nsarguments.c
index 9095d51f6b37..67b7370dcae5 100644
--- a/drivers/acpi/acpica/nsarguments.c
+++ b/drivers/acpi/acpica/nsarguments.c
@@ -69,9 +69,14 @@ void acpi_ns_check_argument_types(struct acpi_evaluate_info *info)
u8 user_arg_type;
u32 i;
- /* If not a predefined name, cannot typecheck args */
-
- if (!info->predefined) {
+ /*
+ * If not a predefined name, cannot typecheck args, because
+ * we have no idea what argument types are expected.
+ * Also, ignore typecheck if warnings/errors if this method
+ * has already been evaluated at least once -- in order
+ * to suppress repetitive messages.
+ */
+ if (!info->predefined || (info->node->flags & ANOBJ_EVALUATED)) {
return;
}
@@ -93,6 +98,10 @@ void acpi_ns_check_argument_types(struct acpi_evaluate_info *info)
acpi_ut_get_type_name
(user_arg_type),
acpi_ut_get_type_name(arg_type)));
+
+ /* Prevent any additional typechecking for this method */
+
+ info->node->flags |= ANOBJ_EVALUATED;
}
}
}
@@ -121,7 +130,7 @@ acpi_ns_check_acpi_compliance(char *pathname,
u32 aml_param_count;
u32 required_param_count;
- if (!predefined) {
+ if (!predefined || (node->flags & ANOBJ_EVALUATED)) {
return;
}
@@ -215,6 +224,10 @@ acpi_ns_check_argument_count(char *pathname,
u32 aml_param_count;
u32 required_param_count;
+ if (node->flags & ANOBJ_EVALUATED) {
+ return;
+ }
+
if (!predefined) {
/*
* Not a predefined name. Check the incoming user argument count
diff --git a/drivers/acpi/acpica/nsinit.c b/drivers/acpi/acpica/nsinit.c
index ce33e7297ea7..9c6297949712 100644
--- a/drivers/acpi/acpica/nsinit.c
+++ b/drivers/acpi/acpica/nsinit.c
@@ -396,6 +396,20 @@ acpi_ns_init_one_object(acpi_handle obj_handle,
info->package_init++;
status = acpi_ds_get_package_arguments(obj_desc);
+ if (ACPI_FAILURE(status)) {
+ break;
+ }
+
+ /*
+ * Resolve all named references in package objects (and all
+ * sub-packages). This action has been deferred until the entire
+ * namespace has been loaded, in order to support external and
+ * forward references from individual package elements (05/2017).
+ */
+ status = acpi_ut_walk_package_tree(obj_desc, NULL,
+ acpi_ds_init_package_element,
+ NULL);
+ obj_desc->package.flags |= AOPOBJ_DATA_VALID;
break;
default:
diff --git a/drivers/acpi/acpica/nsnames.c b/drivers/acpi/acpica/nsnames.c
index aa16aeaa8937..a410760a0308 100644
--- a/drivers/acpi/acpica/nsnames.c
+++ b/drivers/acpi/acpica/nsnames.c
@@ -89,7 +89,14 @@ acpi_size acpi_ns_get_pathname_length(struct acpi_namespace_node *node)
{
acpi_size size;
- ACPI_FUNCTION_ENTRY();
+ /* Validate the Node */
+
+ if (ACPI_GET_DESCRIPTOR_TYPE(node) != ACPI_DESC_TYPE_NAMED) {
+ ACPI_ERROR((AE_INFO,
+ "Invalid/cached reference target node: %p, descriptor type %d",
+ node, ACPI_GET_DESCRIPTOR_TYPE(node)));
+ return (0);
+ }
size = acpi_ns_build_normalized_path(node, NULL, 0, FALSE);
return (size);
diff --git a/drivers/acpi/acpica/nsprepkg.c b/drivers/acpi/acpica/nsprepkg.c
index 4954cb6c9090..a8ea8fb1d299 100644
--- a/drivers/acpi/acpica/nsprepkg.c
+++ b/drivers/acpi/acpica/nsprepkg.c
@@ -614,6 +614,8 @@ acpi_ns_check_package_list(struct acpi_evaluate_info *info,
default: /* Should not get here, type was validated by caller */
+ ACPI_ERROR((AE_INFO, "Invalid Package type: %X",
+ package->ret_info.type));
return (AE_AML_INTERNAL);
}
diff --git a/drivers/acpi/acpica/nsxfeval.c b/drivers/acpi/acpica/nsxfeval.c
index 538c61677c10..783f4c838aee 100644
--- a/drivers/acpi/acpica/nsxfeval.c
+++ b/drivers/acpi/acpica/nsxfeval.c
@@ -100,9 +100,13 @@ acpi_evaluate_object_typed(acpi_handle handle,
free_buffer_on_error = TRUE;
}
- status = acpi_get_handle(handle, pathname, &target_handle);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
+ if (pathname) {
+ status = acpi_get_handle(handle, pathname, &target_handle);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+ } else {
+ target_handle = handle;
}
full_pathname = acpi_ns_get_external_pathname(target_handle);
diff --git a/drivers/acpi/acpica/psloop.c b/drivers/acpi/acpica/psloop.c
index b4224005783c..bb04dec168ad 100644
--- a/drivers/acpi/acpica/psloop.c
+++ b/drivers/acpi/acpica/psloop.c
@@ -164,6 +164,11 @@ acpi_ps_get_arguments(struct acpi_walk_state *walk_state,
INCREMENT_ARG_LIST(walk_state->arg_types);
}
+ ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
+ "Final argument count: %u pass %u\n",
+ walk_state->arg_count,
+ walk_state->pass_number));
+
/*
* Handle executable code at "module-level". This refers to
* executable opcodes that appear outside of any control method.
@@ -277,6 +282,11 @@ acpi_ps_get_arguments(struct acpi_walk_state *walk_state,
AML_NAME_OP)
&& (walk_state->pass_number <=
ACPI_IMODE_LOAD_PASS2)) {
+ ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
+ "Setup Package/Buffer: Pass %u, AML Ptr: %p\n",
+ walk_state->pass_number,
+ aml_op_start));
+
/*
* Skip parsing of Buffers and Packages because we don't have
* enough info in the first pass to parse them correctly.
@@ -570,6 +580,10 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
/* Check for arguments that need to be processed */
+ ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
+ "Parseloop: argument count: %u\n",
+ walk_state->arg_count));
+
if (walk_state->arg_count) {
/*
* There are arguments (complex ones), push Op and
diff --git a/drivers/acpi/acpica/psobject.c b/drivers/acpi/acpica/psobject.c
index ef6384e374fc..0bef6df71bba 100644
--- a/drivers/acpi/acpica/psobject.c
+++ b/drivers/acpi/acpica/psobject.c
@@ -359,6 +359,32 @@ acpi_ps_create_op(struct acpi_walk_state *walk_state,
acpi_ps_build_named_op(walk_state, aml_op_start, op,
&named_op);
acpi_ps_free_op(op);
+
+#ifdef ACPI_ASL_COMPILER
+ if (acpi_gbl_disasm_flag
+ && walk_state->opcode == AML_EXTERNAL_OP
+ && status == AE_NOT_FOUND) {
+ /*
+ * If parsing of AML_EXTERNAL_OP's name path fails, then skip
+ * past this opcode and keep parsing. This is a much better
+ * alternative than to abort the entire disassembler. At this
+ * point, the parser_state is at the end of the namepath of the
+ * external declaration opcode. Setting walk_state->Aml to
+ * walk_state->parser_state.Aml + 2 moves increments the
+ * walk_state->Aml past the object type and the paramcount of the
+ * external opcode. For the error message, only print the AML
+ * offset. We could attempt to print the name but this may cause
+ * a segmentation fault when printing the namepath because the
+ * AML may be incorrect.
+ */
+ acpi_os_printf
+ ("// Invalid external declaration at AML offset 0x%x.\n",
+ walk_state->aml -
+ walk_state->parser_state.aml_start);
+ walk_state->aml = walk_state->parser_state.aml + 2;
+ return_ACPI_STATUS(AE_CTRL_PARSE_CONTINUE);
+ }
+#endif
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/rsxface.c b/drivers/acpi/acpica/rsxface.c
index 59a4f9ed06a7..be65e65e216e 100644
--- a/drivers/acpi/acpica/rsxface.c
+++ b/drivers/acpi/acpica/rsxface.c
@@ -615,7 +615,7 @@ ACPI_EXPORT_SYMBOL(acpi_walk_resource_buffer)
* device we are querying
* name - Method name of the resources we want.
* (METHOD_NAME__CRS, METHOD_NAME__PRS, or
- * METHOD_NAME__AEI)
+ * METHOD_NAME__AEI or METHOD_NAME__DMA)
* user_function - Called for each resource
* context - Passed to user_function
*
@@ -641,11 +641,12 @@ acpi_walk_resources(acpi_handle device_handle,
if (!device_handle || !user_function || !name ||
(!ACPI_COMPARE_NAME(name, METHOD_NAME__CRS) &&
!ACPI_COMPARE_NAME(name, METHOD_NAME__PRS) &&
- !ACPI_COMPARE_NAME(name, METHOD_NAME__AEI))) {
+ !ACPI_COMPARE_NAME(name, METHOD_NAME__AEI) &&
+ !ACPI_COMPARE_NAME(name, METHOD_NAME__DMA))) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
- /* Get the _CRS/_PRS/_AEI resource list */
+ /* Get the _CRS/_PRS/_AEI/_DMA resource list */
buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER;
status = acpi_rs_get_method_data(device_handle, name, &buffer);
diff --git a/drivers/acpi/acpica/tbdata.c b/drivers/acpi/acpica/tbdata.c
index c9d6fa6d7cc6..b19a2f0ea331 100644
--- a/drivers/acpi/acpica/tbdata.c
+++ b/drivers/acpi/acpica/tbdata.c
@@ -50,6 +50,57 @@
#define _COMPONENT ACPI_TABLES
ACPI_MODULE_NAME("tbdata")
+/* Local prototypes */
+static acpi_status
+acpi_tb_check_duplication(struct acpi_table_desc *table_desc, u32 *table_index);
+
+static u8
+acpi_tb_compare_tables(struct acpi_table_desc *table_desc, u32 table_index);
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_tb_compare_tables
+ *
+ * PARAMETERS: table_desc - Table 1 descriptor to be compared
+ * table_index - Index of table 2 to be compared
+ *
+ * RETURN: TRUE if both tables are identical.
+ *
+ * DESCRIPTION: This function compares a table with another table that has
+ * already been installed in the root table list.
+ *
+ ******************************************************************************/
+
+static u8
+acpi_tb_compare_tables(struct acpi_table_desc *table_desc, u32 table_index)
+{
+ acpi_status status = AE_OK;
+ u8 is_identical;
+ struct acpi_table_header *table;
+ u32 table_length;
+ u8 table_flags;
+
+ status =
+ acpi_tb_acquire_table(&acpi_gbl_root_table_list.tables[table_index],
+ &table, &table_length, &table_flags);
+ if (ACPI_FAILURE(status)) {
+ return (FALSE);
+ }
+
+ /*
+ * Check for a table match on the entire table length,
+ * not just the header.
+ */
+ is_identical = (u8)((table_desc->length != table_length ||
+ memcmp(table_desc->pointer, table, table_length)) ?
+ FALSE : TRUE);
+
+ /* Release the acquired table */
+
+ acpi_tb_release_table(table, table_length, table_flags);
+ return (is_identical);
+}
+
/*******************************************************************************
*
* FUNCTION: acpi_tb_init_table_descriptor
@@ -64,6 +115,7 @@ ACPI_MODULE_NAME("tbdata")
* DESCRIPTION: Initialize a new table descriptor
*
******************************************************************************/
+
void
acpi_tb_init_table_descriptor(struct acpi_table_desc *table_desc,
acpi_physical_address address,
@@ -338,7 +390,7 @@ void acpi_tb_invalidate_table(struct acpi_table_desc *table_desc)
acpi_status acpi_tb_validate_temp_table(struct acpi_table_desc *table_desc)
{
- if (!table_desc->pointer && !acpi_gbl_verify_table_checksum) {
+ if (!table_desc->pointer && !acpi_gbl_enable_table_validation) {
/*
* Only validates the header of the table.
* Note that Length contains the size of the mapping after invoking
@@ -354,22 +406,100 @@ acpi_status acpi_tb_validate_temp_table(struct acpi_table_desc *table_desc)
return (acpi_tb_validate_table(table_desc));
}
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_tb_check_duplication
+ *
+ * PARAMETERS: table_desc - Table descriptor
+ * table_index - Where the table index is returned
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Avoid installing duplicated tables. However table override and
+ * user aided dynamic table load is allowed, thus comparing the
+ * address of the table is not sufficient, and checking the entire
+ * table content is required.
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_tb_check_duplication(struct acpi_table_desc *table_desc, u32 *table_index)
+{
+ u32 i;
+
+ ACPI_FUNCTION_TRACE(tb_check_duplication);
+
+ /* Check if table is already registered */
+
+ for (i = 0; i < acpi_gbl_root_table_list.current_table_count; ++i) {
+
+ /* Do not compare with unverified tables */
+
+ if (!
+ (acpi_gbl_root_table_list.tables[i].
+ flags & ACPI_TABLE_IS_VERIFIED)) {
+ continue;
+ }
+
+ /*
+ * Check for a table match on the entire table length,
+ * not just the header.
+ */
+ if (!acpi_tb_compare_tables(table_desc, i)) {
+ continue;
+ }
+
+ /*
+ * Note: the current mechanism does not unregister a table if it is
+ * dynamically unloaded. The related namespace entries are deleted,
+ * but the table remains in the root table list.
+ *
+ * The assumption here is that the number of different tables that
+ * will be loaded is actually small, and there is minimal overhead
+ * in just keeping the table in case it is needed again.
+ *
+ * If this assumption changes in the future (perhaps on large
+ * machines with many table load/unload operations), tables will
+ * need to be unregistered when they are unloaded, and slots in the
+ * root table list should be reused when empty.
+ */
+ if (acpi_gbl_root_table_list.tables[i].flags &
+ ACPI_TABLE_IS_LOADED) {
+
+ /* Table is still loaded, this is an error */
+
+ return_ACPI_STATUS(AE_ALREADY_EXISTS);
+ } else {
+ *table_index = i;
+ return_ACPI_STATUS(AE_CTRL_TERMINATE);
+ }
+ }
+
+ /* Indicate no duplication to the caller */
+
+ return_ACPI_STATUS(AE_OK);
+}
+
/******************************************************************************
*
* FUNCTION: acpi_tb_verify_temp_table
*
* PARAMETERS: table_desc - Table descriptor
* signature - Table signature to verify
+ * table_index - Where the table index is returned
*
* RETURN: Status
*
* DESCRIPTION: This function is called to validate and verify the table, the
* returned table descriptor is in "VALIDATED" state.
+ * Note that 'TableIndex' is required to be set to !NULL to
+ * enable duplication check.
*
*****************************************************************************/
acpi_status
-acpi_tb_verify_temp_table(struct acpi_table_desc *table_desc, char *signature)
+acpi_tb_verify_temp_table(struct acpi_table_desc *table_desc,
+ char *signature, u32 *table_index)
{
acpi_status status = AE_OK;
@@ -392,9 +522,10 @@ acpi_tb_verify_temp_table(struct acpi_table_desc *table_desc, char *signature)
goto invalidate_and_exit;
}
- /* Verify the checksum */
+ if (acpi_gbl_enable_table_validation) {
+
+ /* Verify the checksum */
- if (acpi_gbl_verify_table_checksum) {
status =
acpi_tb_verify_checksum(table_desc->pointer,
table_desc->length);
@@ -411,9 +542,34 @@ acpi_tb_verify_temp_table(struct acpi_table_desc *table_desc, char *signature)
goto invalidate_and_exit;
}
+
+ /* Avoid duplications */
+
+ if (table_index) {
+ status =
+ acpi_tb_check_duplication(table_desc, table_index);
+ if (ACPI_FAILURE(status)) {
+ if (status != AE_CTRL_TERMINATE) {
+ ACPI_EXCEPTION((AE_INFO, AE_NO_MEMORY,
+ "%4.4s 0x%8.8X%8.8X"
+ " Table is duplicated",
+ acpi_ut_valid_nameseg
+ (table_desc->signature.
+ ascii) ? table_desc->
+ signature.
+ ascii : "????",
+ ACPI_FORMAT_UINT64
+ (table_desc->address)));
+ }
+
+ goto invalidate_and_exit;
+ }
+ }
+
+ table_desc->flags |= ACPI_TABLE_IS_VERIFIED;
}
- return_ACPI_STATUS(AE_OK);
+ return_ACPI_STATUS(status);
invalidate_and_exit:
acpi_tb_invalidate_table(table_desc);
@@ -436,6 +592,8 @@ acpi_status acpi_tb_resize_root_table_list(void)
{
struct acpi_table_desc *tables;
u32 table_count;
+ u32 current_table_count, max_table_count;
+ u32 i;
ACPI_FUNCTION_TRACE(tb_resize_root_table_list);
@@ -455,8 +613,8 @@ acpi_status acpi_tb_resize_root_table_list(void)
table_count = acpi_gbl_root_table_list.current_table_count;
}
- tables = ACPI_ALLOCATE_ZEROED(((acpi_size)table_count +
- ACPI_ROOT_TABLE_SIZE_INCREMENT) *
+ max_table_count = table_count + ACPI_ROOT_TABLE_SIZE_INCREMENT;
+ tables = ACPI_ALLOCATE_ZEROED(((acpi_size)max_table_count) *
sizeof(struct acpi_table_desc));
if (!tables) {
ACPI_ERROR((AE_INFO,
@@ -466,9 +624,16 @@ acpi_status acpi_tb_resize_root_table_list(void)
/* Copy and free the previous table array */
+ current_table_count = 0;
if (acpi_gbl_root_table_list.tables) {
- memcpy(tables, acpi_gbl_root_table_list.tables,
- (acpi_size)table_count * sizeof(struct acpi_table_desc));
+ for (i = 0; i < table_count; i++) {
+ if (acpi_gbl_root_table_list.tables[i].address) {
+ memcpy(tables + current_table_count,
+ acpi_gbl_root_table_list.tables + i,
+ sizeof(struct acpi_table_desc));
+ current_table_count++;
+ }
+ }
if (acpi_gbl_root_table_list.flags & ACPI_ROOT_ORIGIN_ALLOCATED) {
ACPI_FREE(acpi_gbl_root_table_list.tables);
@@ -476,8 +641,8 @@ acpi_status acpi_tb_resize_root_table_list(void)
}
acpi_gbl_root_table_list.tables = tables;
- acpi_gbl_root_table_list.max_table_count =
- table_count + ACPI_ROOT_TABLE_SIZE_INCREMENT;
+ acpi_gbl_root_table_list.max_table_count = max_table_count;
+ acpi_gbl_root_table_list.current_table_count = current_table_count;
acpi_gbl_root_table_list.flags |= ACPI_ROOT_ORIGIN_ALLOCATED;
return_ACPI_STATUS(AE_OK);
@@ -818,13 +983,9 @@ acpi_tb_load_table(u32 table_index, struct acpi_namespace_node *parent_node)
acpi_ev_update_gpes(owner_id);
}
- /* Invoke table handler if present */
-
- if (acpi_gbl_table_handler) {
- (void)acpi_gbl_table_handler(ACPI_TABLE_EVENT_LOAD, table,
- acpi_gbl_table_handler_context);
- }
+ /* Invoke table handler */
+ acpi_tb_notify_table(ACPI_TABLE_EVENT_LOAD, table);
return_ACPI_STATUS(status);
}
@@ -894,15 +1055,11 @@ acpi_status acpi_tb_unload_table(u32 table_index)
return_ACPI_STATUS(AE_NOT_EXIST);
}
- /* Invoke table handler if present */
+ /* Invoke table handler */
- if (acpi_gbl_table_handler) {
- status = acpi_get_table_by_index(table_index, &table);
- if (ACPI_SUCCESS(status)) {
- (void)acpi_gbl_table_handler(ACPI_TABLE_EVENT_UNLOAD,
- table,
- acpi_gbl_table_handler_context);
- }
+ status = acpi_get_table_by_index(table_index, &table);
+ if (ACPI_SUCCESS(status)) {
+ acpi_tb_notify_table(ACPI_TABLE_EVENT_UNLOAD, table);
}
/* Delete the portion of the namespace owned by this table */
@@ -918,3 +1075,26 @@ acpi_status acpi_tb_unload_table(u32 table_index)
}
ACPI_EXPORT_SYMBOL(acpi_tb_unload_table)
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_tb_notify_table
+ *
+ * PARAMETERS: event - Table event
+ * table - Validated table pointer
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Notify a table event to the users.
+ *
+ ******************************************************************************/
+
+void acpi_tb_notify_table(u32 event, void *table)
+{
+ /* Invoke table handler if present */
+
+ if (acpi_gbl_table_handler) {
+ (void)acpi_gbl_table_handler(event, table,
+ acpi_gbl_table_handler_context);
+ }
+}
diff --git a/drivers/acpi/acpica/tbinstal.c b/drivers/acpi/acpica/tbinstal.c
index 4620f3c68c13..0dfc0ac3c141 100644
--- a/drivers/acpi/acpica/tbinstal.c
+++ b/drivers/acpi/acpica/tbinstal.c
@@ -48,54 +48,6 @@
#define _COMPONENT ACPI_TABLES
ACPI_MODULE_NAME("tbinstal")
-/* Local prototypes */
-static u8
-acpi_tb_compare_tables(struct acpi_table_desc *table_desc, u32 table_index);
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_tb_compare_tables
- *
- * PARAMETERS: table_desc - Table 1 descriptor to be compared
- * table_index - Index of table 2 to be compared
- *
- * RETURN: TRUE if both tables are identical.
- *
- * DESCRIPTION: This function compares a table with another table that has
- * already been installed in the root table list.
- *
- ******************************************************************************/
-
-static u8
-acpi_tb_compare_tables(struct acpi_table_desc *table_desc, u32 table_index)
-{
- acpi_status status = AE_OK;
- u8 is_identical;
- struct acpi_table_header *table;
- u32 table_length;
- u8 table_flags;
-
- status =
- acpi_tb_acquire_table(&acpi_gbl_root_table_list.tables[table_index],
- &table, &table_length, &table_flags);
- if (ACPI_FAILURE(status)) {
- return (FALSE);
- }
-
- /*
- * Check for a table match on the entire table length,
- * not just the header.
- */
- is_identical = (u8)((table_desc->length != table_length ||
- memcmp(table_desc->pointer, table, table_length)) ?
- FALSE : TRUE);
-
- /* Release the acquired table */
-
- acpi_tb_release_table(table, table_length, table_flags);
- return (is_identical);
-}
-
/*******************************************************************************
*
* FUNCTION: acpi_tb_install_table_with_override
@@ -112,7 +64,6 @@ acpi_tb_compare_tables(struct acpi_table_desc *table_desc, u32 table_index)
* table array.
*
******************************************************************************/
-
void
acpi_tb_install_table_with_override(struct acpi_table_desc *new_table_desc,
u8 override, u32 *table_index)
@@ -210,95 +161,29 @@ acpi_tb_install_standard_table(acpi_physical_address address,
goto release_and_exit;
}
- /* Validate and verify a table before installation */
-
- status = acpi_tb_verify_temp_table(&new_table_desc, NULL);
- if (ACPI_FAILURE(status)) {
- goto release_and_exit;
- }
-
/* Acquire the table lock */
(void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
- if (reload) {
- /*
- * Validate the incoming table signature.
- *
- * 1) Originally, we checked the table signature for "SSDT" or "PSDT".
- * 2) We added support for OEMx tables, signature "OEM".
- * 3) Valid tables were encountered with a null signature, so we just
- * gave up on validating the signature, (05/2008).
- * 4) We encountered non-AML tables such as the MADT, which caused
- * interpreter errors and kernel faults. So now, we once again allow
- * only "SSDT", "OEMx", and now, also a null signature. (05/2011).
- */
- if ((new_table_desc.signature.ascii[0] != 0x00) &&
- (!ACPI_COMPARE_NAME
- (&new_table_desc.signature, ACPI_SIG_SSDT))
- && (strncmp(new_table_desc.signature.ascii, "OEM", 3))) {
- ACPI_BIOS_ERROR((AE_INFO,
- "Table has invalid signature [%4.4s] (0x%8.8X), "
- "must be SSDT or OEMx",
- acpi_ut_valid_nameseg(new_table_desc.
- signature.
- ascii) ?
- new_table_desc.signature.
- ascii : "????",
- new_table_desc.signature.integer));
-
- status = AE_BAD_SIGNATURE;
- goto unlock_and_exit;
- }
-
- /* Check if table is already registered */
-
- for (i = 0; i < acpi_gbl_root_table_list.current_table_count;
- ++i) {
- /*
- * Check for a table match on the entire table length,
- * not just the header.
- */
- if (!acpi_tb_compare_tables(&new_table_desc, i)) {
- continue;
- }
+ /* Validate and verify a table before installation */
+ status = acpi_tb_verify_temp_table(&new_table_desc, NULL, &i);
+ if (ACPI_FAILURE(status)) {
+ if (status == AE_CTRL_TERMINATE) {
/*
- * Note: the current mechanism does not unregister a table if it is
- * dynamically unloaded. The related namespace entries are deleted,
- * but the table remains in the root table list.
- *
- * The assumption here is that the number of different tables that
- * will be loaded is actually small, and there is minimal overhead
- * in just keeping the table in case it is needed again.
- *
- * If this assumption changes in the future (perhaps on large
- * machines with many table load/unload operations), tables will
- * need to be unregistered when they are unloaded, and slots in the
- * root table list should be reused when empty.
+ * Table was unloaded, allow it to be reloaded.
+ * As we are going to return AE_OK to the caller, we should
+ * take the responsibility of freeing the input descriptor.
+ * Refill the input descriptor to ensure
+ * acpi_tb_install_table_with_override() can be called again to
+ * indicate the re-installation.
*/
- if (acpi_gbl_root_table_list.tables[i].flags &
- ACPI_TABLE_IS_LOADED) {
-
- /* Table is still loaded, this is an error */
-
- status = AE_ALREADY_EXISTS;
- goto unlock_and_exit;
- } else {
- /*
- * Table was unloaded, allow it to be reloaded.
- * As we are going to return AE_OK to the caller, we should
- * take the responsibility of freeing the input descriptor.
- * Refill the input descriptor to ensure
- * acpi_tb_install_table_with_override() can be called again to
- * indicate the re-installation.
- */
- acpi_tb_uninstall_table(&new_table_desc);
- (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
- *table_index = i;
- return_ACPI_STATUS(AE_OK);
- }
+ acpi_tb_uninstall_table(&new_table_desc);
+ (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
+ *table_index = i;
+ return_ACPI_STATUS(AE_OK);
}
+ goto unlock_and_exit;
}
/* Add the table to the global root table list */
@@ -306,14 +191,10 @@ acpi_tb_install_standard_table(acpi_physical_address address,
acpi_tb_install_table_with_override(&new_table_desc, override,
table_index);
- /* Invoke table handler if present */
+ /* Invoke table handler */
(void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
- if (acpi_gbl_table_handler) {
- (void)acpi_gbl_table_handler(ACPI_TABLE_EVENT_INSTALL,
- new_table_desc.pointer,
- acpi_gbl_table_handler_context);
- }
+ acpi_tb_notify_table(ACPI_TABLE_EVENT_INSTALL, new_table_desc.pointer);
(void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
unlock_and_exit:
@@ -382,9 +263,11 @@ void acpi_tb_override_table(struct acpi_table_desc *old_table_desc)
finish_override:
- /* Validate and verify a table before overriding */
-
- status = acpi_tb_verify_temp_table(&new_table_desc, NULL);
+ /*
+ * Validate and verify a table before overriding, no nested table
+ * duplication check as it's too complicated and unnecessary.
+ */
+ status = acpi_tb_verify_temp_table(&new_table_desc, NULL, NULL);
if (ACPI_FAILURE(status)) {
return;
}
diff --git a/drivers/acpi/acpica/tbxface.c b/drivers/acpi/acpica/tbxface.c
index 010b1c43df92..26ad596c973e 100644
--- a/drivers/acpi/acpica/tbxface.c
+++ b/drivers/acpi/acpica/tbxface.c
@@ -167,7 +167,8 @@ ACPI_EXPORT_SYMBOL_INIT(acpi_initialize_tables)
acpi_status ACPI_INIT_FUNCTION acpi_reallocate_root_table(void)
{
acpi_status status;
- u32 i;
+ struct acpi_table_desc *table_desc;
+ u32 i, j;
ACPI_FUNCTION_TRACE(acpi_reallocate_root_table);
@@ -179,6 +180,8 @@ acpi_status ACPI_INIT_FUNCTION acpi_reallocate_root_table(void)
return_ACPI_STATUS(AE_SUPPORT);
}
+ (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
+
/*
* Ensure OS early boot logic, which is required by some hosts. If the
* table state is reported to be wrong, developers should fix the
@@ -186,17 +189,39 @@ acpi_status ACPI_INIT_FUNCTION acpi_reallocate_root_table(void)
* early stage.
*/
for (i = 0; i < acpi_gbl_root_table_list.current_table_count; ++i) {
- if (acpi_gbl_root_table_list.tables[i].pointer) {
+ table_desc = &acpi_gbl_root_table_list.tables[i];
+ if (table_desc->pointer) {
ACPI_ERROR((AE_INFO,
"Table [%4.4s] is not invalidated during early boot stage",
- acpi_gbl_root_table_list.tables[i].
- signature.ascii));
+ table_desc->signature.ascii));
}
}
- acpi_gbl_root_table_list.flags |= ACPI_ROOT_ALLOW_RESIZE;
+ if (!acpi_gbl_enable_table_validation) {
+ /*
+ * Now it's safe to do full table validation. We can do deferred
+ * table initilization here once the flag is set.
+ */
+ acpi_gbl_enable_table_validation = TRUE;
+ for (i = 0; i < acpi_gbl_root_table_list.current_table_count;
+ ++i) {
+ table_desc = &acpi_gbl_root_table_list.tables[i];
+ if (!(table_desc->flags & ACPI_TABLE_IS_VERIFIED)) {
+ status =
+ acpi_tb_verify_temp_table(table_desc, NULL,
+ &j);
+ if (ACPI_FAILURE(status)) {
+ acpi_tb_uninstall_table(table_desc);
+ }
+ }
+ }
+ }
+ acpi_gbl_root_table_list.flags |= ACPI_ROOT_ALLOW_RESIZE;
status = acpi_tb_resize_root_table_list();
+ acpi_gbl_root_table_list.flags |= ACPI_ROOT_ORIGIN_ALLOCATED;
+
+ (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
return_ACPI_STATUS(status);
}
@@ -369,6 +394,10 @@ void acpi_put_table(struct acpi_table_header *table)
ACPI_FUNCTION_TRACE(acpi_put_table);
+ if (!table) {
+ return_VOID;
+ }
+
(void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
/* Walk the root table list */
diff --git a/drivers/acpi/acpica/tbxfload.c b/drivers/acpi/acpica/tbxfload.c
index b71ce3b817ea..d81f442228b8 100644
--- a/drivers/acpi/acpica/tbxfload.c
+++ b/drivers/acpi/acpica/tbxfload.c
@@ -206,7 +206,7 @@ acpi_status acpi_tb_load_namespace(void)
for (i = 0; i < acpi_gbl_root_table_list.current_table_count; ++i) {
table = &acpi_gbl_root_table_list.tables[i];
- if (!acpi_gbl_root_table_list.tables[i].address ||
+ if (!table->address ||
(!ACPI_COMPARE_NAME(table->signature.ascii, ACPI_SIG_SSDT)
&& !ACPI_COMPARE_NAME(table->signature.ascii,
ACPI_SIG_PSDT)
diff --git a/drivers/acpi/acpica/uthex.c b/drivers/acpi/acpica/uthex.c
index 6600bc257516..fb406daf47fa 100644
--- a/drivers/acpi/acpica/uthex.c
+++ b/drivers/acpi/acpica/uthex.c
@@ -69,8 +69,10 @@ static const char acpi_gbl_hex_to_ascii[] = {
char acpi_ut_hex_to_ascii_char(u64 integer, u32 position)
{
+ u64 index;
- return (acpi_gbl_hex_to_ascii[(integer >> position) & 0xF]);
+ acpi_ut_short_shift_right(integer, position, &index);
+ return (acpi_gbl_hex_to_ascii[index & 0xF]);
}
/*******************************************************************************
diff --git a/drivers/acpi/acpica/utmath.c b/drivers/acpi/acpica/utmath.c
index aa0502d1d019..5f9c680076c4 100644
--- a/drivers/acpi/acpica/utmath.c
+++ b/drivers/acpi/acpica/utmath.c
@@ -47,15 +47,6 @@
#define _COMPONENT ACPI_UTILITIES
ACPI_MODULE_NAME("utmath")
-/*
- * Optional support for 64-bit double-precision integer divide. This code
- * is configurable and is implemented in order to support 32-bit kernel
- * environments where a 64-bit double-precision math library is not available.
- *
- * Support for a more normal 64-bit divide/modulo (with check for a divide-
- * by-zero) appears after this optional section of code.
- */
-#ifndef ACPI_USE_NATIVE_DIVIDE
/* Structures used only for 64-bit divide */
typedef struct uint64_struct {
u32 lo;
@@ -69,6 +60,217 @@ typedef union uint64_overlay {
} uint64_overlay;
+/*
+ * Optional support for 64-bit double-precision integer multiply and shift.
+ * This code is configurable and is implemented in order to support 32-bit
+ * kernel environments where a 64-bit double-precision math library is not
+ * available.
+ */
+#ifndef ACPI_USE_NATIVE_MATH64
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_short_multiply
+ *
+ * PARAMETERS: multiplicand - 64-bit multiplicand
+ * multiplier - 32-bit multiplier
+ * out_product - Pointer to where the product is returned
+ *
+ * DESCRIPTION: Perform a short multiply.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ut_short_multiply(u64 multiplicand, u32 multiplier, u64 *out_product)
+{
+ union uint64_overlay multiplicand_ovl;
+ union uint64_overlay product;
+ u32 carry32;
+
+ ACPI_FUNCTION_TRACE(ut_short_multiply);
+
+ multiplicand_ovl.full = multiplicand;
+
+ /*
+ * The Product is 64 bits, the carry is always 32 bits,
+ * and is generated by the second multiply.
+ */
+ ACPI_MUL_64_BY_32(0, multiplicand_ovl.part.hi, multiplier,
+ product.part.hi, carry32);
+
+ ACPI_MUL_64_BY_32(0, multiplicand_ovl.part.lo, multiplier,
+ product.part.lo, carry32);
+
+ product.part.hi += carry32;
+
+ /* Return only what was requested */
+
+ if (out_product) {
+ *out_product = product.full;
+ }
+
+ return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_short_shift_left
+ *
+ * PARAMETERS: operand - 64-bit shift operand
+ * count - 32-bit shift count
+ * out_result - Pointer to where the result is returned
+ *
+ * DESCRIPTION: Perform a short left shift.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ut_short_shift_left(u64 operand, u32 count, u64 *out_result)
+{
+ union uint64_overlay operand_ovl;
+
+ ACPI_FUNCTION_TRACE(ut_short_shift_left);
+
+ operand_ovl.full = operand;
+
+ if ((count & 63) >= 32) {
+ operand_ovl.part.hi = operand_ovl.part.lo;
+ operand_ovl.part.lo ^= operand_ovl.part.lo;
+ count = (count & 63) - 32;
+ }
+ ACPI_SHIFT_LEFT_64_BY_32(operand_ovl.part.hi,
+ operand_ovl.part.lo, count);
+
+ /* Return only what was requested */
+
+ if (out_result) {
+ *out_result = operand_ovl.full;
+ }
+
+ return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_short_shift_right
+ *
+ * PARAMETERS: operand - 64-bit shift operand
+ * count - 32-bit shift count
+ * out_result - Pointer to where the result is returned
+ *
+ * DESCRIPTION: Perform a short right shift.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ut_short_shift_right(u64 operand, u32 count, u64 *out_result)
+{
+ union uint64_overlay operand_ovl;
+
+ ACPI_FUNCTION_TRACE(ut_short_shift_right);
+
+ operand_ovl.full = operand;
+
+ if ((count & 63) >= 32) {
+ operand_ovl.part.lo = operand_ovl.part.hi;
+ operand_ovl.part.hi ^= operand_ovl.part.hi;
+ count = (count & 63) - 32;
+ }
+ ACPI_SHIFT_RIGHT_64_BY_32(operand_ovl.part.hi,
+ operand_ovl.part.lo, count);
+
+ /* Return only what was requested */
+
+ if (out_result) {
+ *out_result = operand_ovl.full;
+ }
+
+ return_ACPI_STATUS(AE_OK);
+}
+#else
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_short_multiply
+ *
+ * PARAMETERS: See function headers above
+ *
+ * DESCRIPTION: Native version of the ut_short_multiply function.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ut_short_multiply(u64 multiplicand, u32 multiplier, u64 *out_product)
+{
+
+ ACPI_FUNCTION_TRACE(ut_short_multiply);
+
+ /* Return only what was requested */
+
+ if (out_product) {
+ *out_product = multiplicand * multiplier;
+ }
+
+ return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_short_shift_left
+ *
+ * PARAMETERS: See function headers above
+ *
+ * DESCRIPTION: Native version of the ut_short_shift_left function.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ut_short_shift_left(u64 operand, u32 count, u64 *out_result)
+{
+
+ ACPI_FUNCTION_TRACE(ut_short_shift_left);
+
+ /* Return only what was requested */
+
+ if (out_result) {
+ *out_result = operand << count;
+ }
+
+ return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_short_shift_right
+ *
+ * PARAMETERS: See function headers above
+ *
+ * DESCRIPTION: Native version of the ut_short_shift_right function.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ut_short_shift_right(u64 operand, u32 count, u64 *out_result)
+{
+
+ ACPI_FUNCTION_TRACE(ut_short_shift_right);
+
+ /* Return only what was requested */
+
+ if (out_result) {
+ *out_result = operand >> count;
+ }
+
+ return_ACPI_STATUS(AE_OK);
+}
+#endif
+
+/*
+ * Optional support for 64-bit double-precision integer divide. This code
+ * is configurable and is implemented in order to support 32-bit kernel
+ * environments where a 64-bit double-precision math library is not available.
+ *
+ * Support for a more normal 64-bit divide/modulo (with check for a divide-
+ * by-zero) appears after this optional section of code.
+ */
+#ifndef ACPI_USE_NATIVE_DIVIDE
+
/*******************************************************************************
*
* FUNCTION: acpi_ut_short_divide
@@ -258,6 +460,7 @@ acpi_ut_divide(u64 in_dividend,
}
#else
+
/*******************************************************************************
*
* FUNCTION: acpi_ut_short_divide, acpi_ut_divide
@@ -272,6 +475,7 @@ acpi_ut_divide(u64 in_dividend,
* perform the divide.
*
******************************************************************************/
+
acpi_status
acpi_ut_short_divide(u64 in_dividend,
u32 divisor, u64 *out_quotient, u32 *out_remainder)
diff --git a/drivers/acpi/acpica/utmisc.c b/drivers/acpi/acpica/utmisc.c
index 443ffad01209..45c78c2adbf0 100644
--- a/drivers/acpi/acpica/utmisc.c
+++ b/drivers/acpi/acpica/utmisc.c
@@ -224,7 +224,7 @@ acpi_ut_create_update_state_and_push(union acpi_operand_object *object,
*
* RETURN: Status
*
- * DESCRIPTION: Walk through a package
+ * DESCRIPTION: Walk through a package, including subpackages
*
******************************************************************************/
@@ -236,8 +236,8 @@ acpi_ut_walk_package_tree(union acpi_operand_object *source_object,
acpi_status status = AE_OK;
union acpi_generic_state *state_list = NULL;
union acpi_generic_state *state;
- u32 this_index;
union acpi_operand_object *this_source_obj;
+ u32 this_index;
ACPI_FUNCTION_TRACE(ut_walk_package_tree);
@@ -251,8 +251,10 @@ acpi_ut_walk_package_tree(union acpi_operand_object *source_object,
/* Get one element of the package */
this_index = state->pkg.index;
- this_source_obj = (union acpi_operand_object *)
+ this_source_obj =
state->pkg.source_object->package.elements[this_index];
+ state->pkg.this_target_obj =
+ &state->pkg.source_object->package.elements[this_index];
/*
* Check for:
@@ -339,6 +341,8 @@ acpi_ut_walk_package_tree(union acpi_operand_object *source_object,
/* We should never get here */
+ ACPI_ERROR((AE_INFO, "State list did not terminate correctly"));
+
return_ACPI_STATUS(AE_AML_INTERNAL);
}
diff --git a/drivers/acpi/acpica/utobject.c b/drivers/acpi/acpica/utobject.c
index 64e6641bfe82..cb3db9fed50d 100644
--- a/drivers/acpi/acpica/utobject.c
+++ b/drivers/acpi/acpica/utobject.c
@@ -483,6 +483,11 @@ acpi_ut_get_simple_object_size(union acpi_operand_object *internal_object,
/* A namespace node should never get here */
+ ACPI_ERROR((AE_INFO,
+ "Received a namespace node [%4.4s] "
+ "where an operand object is required",
+ ACPI_CAST_PTR(struct acpi_namespace_node,
+ internal_object)->name.ascii));
return_ACPI_STATUS(AE_AML_INTERNAL);
}
diff --git a/drivers/acpi/acpica/utprint.c b/drivers/acpi/acpica/utprint.c
index 7e6e1ae6140f..c008589b41bd 100644
--- a/drivers/acpi/acpica/utprint.c
+++ b/drivers/acpi/acpica/utprint.c
@@ -176,7 +176,7 @@ const char *acpi_ut_scan_number(const char *string, u64 *number_ptr)
u64 number = 0;
while (isdigit((int)*string)) {
- number *= 10;
+ acpi_ut_short_multiply(number, 10, &number);
number += *(string++) - '0';
}
@@ -286,7 +286,7 @@ static char *acpi_ut_format_number(char *string,
/* Generate full string in reverse order */
pos = acpi_ut_put_number(reversed_string, number, base, upper);
- i = ACPI_PTR_DIFF(pos, reversed_string);
+ i = (s32)ACPI_PTR_DIFF(pos, reversed_string);
/* Printing 100 using %2d gives "100", not "00" */
@@ -475,7 +475,7 @@ int vsnprintf(char *string, acpi_size size, const char *format, va_list args)
if (!s) {
s = "<NULL>";
}
- length = acpi_ut_bound_string_length(s, precision);
+ length = (s32)acpi_ut_bound_string_length(s, precision);
if (!(type & ACPI_FORMAT_LEFT)) {
while (length < width--) {
pos =
@@ -579,7 +579,7 @@ int vsnprintf(char *string, acpi_size size, const char *format, va_list args)
}
}
- return (ACPI_PTR_DIFF(pos, string));
+ return ((int)ACPI_PTR_DIFF(pos, string));
}
/*******************************************************************************
diff --git a/drivers/acpi/acpica/utresrc.c b/drivers/acpi/acpica/utresrc.c
index 70f78a4bf13b..f9801d13547f 100644
--- a/drivers/acpi/acpica/utresrc.c
+++ b/drivers/acpi/acpica/utresrc.c
@@ -237,6 +237,13 @@ acpi_ut_walk_aml_resources(struct acpi_walk_state *walk_state,
return_ACPI_STATUS(AE_AML_NO_RESOURCE_END_TAG);
}
+ /*
+ * Don't attempt to perform any validation on the 2nd byte.
+ * Although all known ASL compilers insert a zero for the 2nd
+ * byte, it can also be a checksum (as per the ACPI spec),
+ * and this is occasionally seen in the field. July 2017.
+ */
+
/* Return the pointer to the end_tag if requested */
if (!user_function) {
diff --git a/drivers/acpi/acpica/utstate.c b/drivers/acpi/acpica/utstate.c
index 64308c304ade..eafabcd2fada 100644
--- a/drivers/acpi/acpica/utstate.c
+++ b/drivers/acpi/acpica/utstate.c
@@ -226,7 +226,7 @@ union acpi_generic_state *acpi_ut_create_update_state(union acpi_operand_object
union acpi_generic_state *acpi_ut_create_pkg_state(void *internal_object,
void *external_object,
- u16 index)
+ u32 index)
{
union acpi_generic_state *state;
diff --git a/drivers/acpi/acpica/utstrtoul64.c b/drivers/acpi/acpica/utstrtoul64.c
index f42be01d99fd..9633ee142855 100644
--- a/drivers/acpi/acpica/utstrtoul64.c
+++ b/drivers/acpi/acpica/utstrtoul64.c
@@ -276,8 +276,8 @@ static u64 acpi_ut_strtoul_base10(char *string, u32 flags)
/* Convert and insert (add) the decimal digit */
- next_value =
- (return_value * 10) + (ascii_digit - ACPI_ASCII_ZERO);
+ acpi_ut_short_multiply(return_value, 10, &next_value);
+ next_value += (ascii_digit - ACPI_ASCII_ZERO);
/* Check for overflow (32 or 64 bit) - return current converted value */
@@ -335,9 +335,8 @@ static u64 acpi_ut_strtoul_base16(char *string, u32 flags)
/* Convert and insert the hex digit */
- return_value =
- (return_value << 4) |
- acpi_ut_ascii_char_to_hex(ascii_digit);
+ acpi_ut_short_shift_left(return_value, 4, &return_value);
+ return_value |= acpi_ut_ascii_char_to_hex(ascii_digit);
string++;
valid_digits++;
diff --git a/drivers/acpi/acpica/uttrack.c b/drivers/acpi/acpica/uttrack.c
index 9a07a42cae34..3c8de88ecbd5 100644
--- a/drivers/acpi/acpica/uttrack.c
+++ b/drivers/acpi/acpica/uttrack.c
@@ -591,6 +591,10 @@ void acpi_ut_dump_allocations(u32 component, const char *module)
return_VOID;
}
+ if (!acpi_gbl_global_list) {
+ goto exit;
+ }
+
element = acpi_gbl_global_list->list_head;
while (element) {
if ((element->component & component) &&
@@ -602,7 +606,7 @@ void acpi_ut_dump_allocations(u32 component, const char *module)
if (element->size <
sizeof(struct acpi_common_descriptor)) {
- acpi_os_printf("%p Length 0x%04X %9.9s-%u "
+ acpi_os_printf("%p Length 0x%04X %9.9s-%4.4u "
"[Not a Descriptor - too small]\n",
descriptor, element->size,
element->module, element->line);
@@ -612,7 +616,7 @@ void acpi_ut_dump_allocations(u32 component, const char *module)
if (ACPI_GET_DESCRIPTOR_TYPE(descriptor) !=
ACPI_DESC_TYPE_CACHED) {
acpi_os_printf
- ("%p Length 0x%04X %9.9s-%u [%s] ",
+ ("%p Length 0x%04X %9.9s-%4.4u [%s] ",
descriptor, element->size,
element->module, element->line,
acpi_ut_get_descriptor_name
@@ -705,6 +709,7 @@ void acpi_ut_dump_allocations(u32 component, const char *module)
element = element->next;
}
+exit:
(void)acpi_ut_release_mutex(ACPI_MTX_MEMORY);
/* Print summary */
diff --git a/drivers/acpi/apei/apei-internal.h b/drivers/acpi/apei/apei-internal.h
index 6e9f14c0a71b..cb4126051f62 100644
--- a/drivers/acpi/apei/apei-internal.h
+++ b/drivers/acpi/apei/apei-internal.h
@@ -120,11 +120,6 @@ int apei_exec_collect_resources(struct apei_exec_context *ctx,
struct dentry;
struct dentry *apei_get_debugfs_dir(void);
-#define apei_estatus_for_each_section(estatus, section) \
- for (section = (struct acpi_hest_generic_data *)(estatus + 1); \
- (void *)section - (void *)estatus < estatus->data_length; \
- section = (void *)(section+1) + section->error_data_length)
-
static inline u32 cper_estatus_len(struct acpi_hest_generic_status *estatus)
{
if (estatus->raw_data_length)
diff --git a/drivers/acpi/apei/einj.c b/drivers/acpi/apei/einj.c
index ec50c32ea3da..b38737c83a24 100644
--- a/drivers/acpi/apei/einj.c
+++ b/drivers/acpi/apei/einj.c
@@ -281,7 +281,7 @@ static struct acpi_generic_address *einj_get_trigger_parameter_region(
((char *)trigger_tab + sizeof(struct acpi_einj_trigger));
for (i = 0; i < trigger_tab->entry_count; i++) {
if (entry->action == ACPI_EINJ_TRIGGER_ERROR &&
- entry->instruction == ACPI_EINJ_WRITE_REGISTER_VALUE &&
+ entry->instruction <= ACPI_EINJ_WRITE_REGISTER_VALUE &&
entry->register_region.space_id ==
ACPI_ADR_SPACE_SYSTEM_MEMORY &&
(entry->register_region.address & param2) == (param1 & param2))
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index d661d452b238..077f9bad6f44 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -1157,7 +1157,8 @@ static int ghes_probe(struct platform_device *ghes_dev)
generic->header.source_id);
goto err_edac_unreg;
}
- rc = request_irq(ghes->irq, ghes_irq_func, 0, "GHES IRQ", ghes);
+ rc = request_irq(ghes->irq, ghes_irq_func, IRQF_SHARED,
+ "GHES IRQ", ghes);
if (rc) {
pr_err(GHES_PFX "Failed to register IRQ for generic hardware error source: %d\n",
generic->header.source_id);
@@ -1265,9 +1266,14 @@ static int __init ghes_init(void)
if (acpi_disabled)
return -ENODEV;
- if (hest_disable) {
+ switch (hest_disable) {
+ case HEST_NOT_FOUND:
+ return -ENODEV;
+ case HEST_DISABLED:
pr_info(GHES_PFX "HEST is not enabled!\n");
return -EINVAL;
+ default:
+ break;
}
if (ghes_disable) {
diff --git a/drivers/acpi/apei/hest.c b/drivers/acpi/apei/hest.c
index 456b488eb1df..9cb74115a43d 100644
--- a/drivers/acpi/apei/hest.c
+++ b/drivers/acpi/apei/hest.c
@@ -37,7 +37,7 @@
#define HEST_PFX "HEST: "
-bool hest_disable;
+int hest_disable;
EXPORT_SYMBOL_GPL(hest_disable);
/* HEST table parsing */
@@ -213,7 +213,7 @@ err:
static int __init setup_hest_disable(char *str)
{
- hest_disable = 1;
+ hest_disable = HEST_DISABLED;
return 0;
}
@@ -232,9 +232,10 @@ void __init acpi_hest_init(void)
status = acpi_get_table(ACPI_SIG_HEST, 0,
(struct acpi_table_header **)&hest_tab);
- if (status == AE_NOT_FOUND)
- goto err;
- else if (ACPI_FAILURE(status)) {
+ if (status == AE_NOT_FOUND) {
+ hest_disable = HEST_NOT_FOUND;
+ return;
+ } else if (ACPI_FAILURE(status)) {
const char *msg = acpi_format_exception(status);
pr_err(HEST_PFX "Failed to get table, %s\n", msg);
rc = -EINVAL;
@@ -257,5 +258,5 @@ void __init acpi_hest_init(void)
pr_info(HEST_PFX "Table parsing has been initialized.\n");
return;
err:
- hest_disable = 1;
+ hest_disable = HEST_DISABLED;
}
diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c
index a3215ee671c1..9565d572f8dd 100644
--- a/drivers/acpi/arm64/iort.c
+++ b/drivers/acpi/arm64/iort.c
@@ -588,7 +588,8 @@ void acpi_configure_pmsi_domain(struct device *dev)
dev_set_msi_domain(dev, msi_domain);
}
-static int __get_pci_rid(struct pci_dev *pdev, u16 alias, void *data)
+static int __maybe_unused __get_pci_rid(struct pci_dev *pdev, u16 alias,
+ void *data)
{
u32 *rid = data;
@@ -633,8 +634,7 @@ int iort_add_device_replay(const struct iommu_ops *ops, struct device *dev)
{
int err = 0;
- if (!IS_ERR_OR_NULL(ops) && ops->add_device && dev->bus &&
- !dev->iommu_group)
+ if (ops->add_device && dev->bus && !dev->iommu_group)
err = ops->add_device(dev);
return err;
@@ -648,45 +648,81 @@ int iort_add_device_replay(const struct iommu_ops *ops, struct device *dev)
{ return 0; }
#endif
-static const struct iommu_ops *iort_iommu_xlate(struct device *dev,
- struct acpi_iort_node *node,
- u32 streamid)
+static int iort_iommu_xlate(struct device *dev, struct acpi_iort_node *node,
+ u32 streamid)
{
- const struct iommu_ops *ops = NULL;
- int ret = -ENODEV;
+ const struct iommu_ops *ops;
struct fwnode_handle *iort_fwnode;
- if (node) {
- iort_fwnode = iort_get_fwnode(node);
- if (!iort_fwnode)
- return NULL;
+ if (!node)
+ return -ENODEV;
- ops = iommu_ops_from_fwnode(iort_fwnode);
- /*
- * If the ops look-up fails, this means that either
- * the SMMU drivers have not been probed yet or that
- * the SMMU drivers are not built in the kernel;
- * Depending on whether the SMMU drivers are built-in
- * in the kernel or not, defer the IOMMU configuration
- * or just abort it.
- */
- if (!ops)
- return iort_iommu_driver_enabled(node->type) ?
- ERR_PTR(-EPROBE_DEFER) : NULL;
+ iort_fwnode = iort_get_fwnode(node);
+ if (!iort_fwnode)
+ return -ENODEV;
- ret = arm_smmu_iort_xlate(dev, streamid, iort_fwnode, ops);
- }
+ /*
+ * If the ops look-up fails, this means that either
+ * the SMMU drivers have not been probed yet or that
+ * the SMMU drivers are not built in the kernel;
+ * Depending on whether the SMMU drivers are built-in
+ * in the kernel or not, defer the IOMMU configuration
+ * or just abort it.
+ */
+ ops = iommu_ops_from_fwnode(iort_fwnode);
+ if (!ops)
+ return iort_iommu_driver_enabled(node->type) ?
+ -EPROBE_DEFER : -ENODEV;
+
+ return arm_smmu_iort_xlate(dev, streamid, iort_fwnode, ops);
+}
+
+struct iort_pci_alias_info {
+ struct device *dev;
+ struct acpi_iort_node *node;
+};
+
+static int iort_pci_iommu_init(struct pci_dev *pdev, u16 alias, void *data)
+{
+ struct iort_pci_alias_info *info = data;
+ struct acpi_iort_node *parent;
+ u32 streamid;
- return ret ? NULL : ops;
+ parent = iort_node_map_id(info->node, alias, &streamid,
+ IORT_IOMMU_TYPE);
+ return iort_iommu_xlate(info->dev, parent, streamid);
+}
+
+static int nc_dma_get_range(struct device *dev, u64 *size)
+{
+ struct acpi_iort_node *node;
+ struct acpi_iort_named_component *ncomp;
+
+ node = iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT,
+ iort_match_node_callback, dev);
+ if (!node)
+ return -ENODEV;
+
+ ncomp = (struct acpi_iort_named_component *)node->node_data;
+
+ *size = ncomp->memory_address_limit >= 64 ? U64_MAX :
+ 1ULL<<ncomp->memory_address_limit;
+
+ return 0;
}
/**
- * iort_set_dma_mask - Set-up dma mask for a device.
+ * iort_dma_setup() - Set-up device DMA parameters.
*
* @dev: device to configure
+ * @dma_addr: device DMA address result pointer
+ * @size: DMA range size result pointer
*/
-void iort_set_dma_mask(struct device *dev)
+void iort_dma_setup(struct device *dev, u64 *dma_addr, u64 *dma_size)
{
+ u64 mask, dmaaddr = 0, size = 0, offset = 0;
+ int ret, msb;
+
/*
* Set default coherent_dma_mask to 32 bit. Drivers are expected to
* setup the correct supported mask.
@@ -700,6 +736,36 @@ void iort_set_dma_mask(struct device *dev)
*/
if (!dev->dma_mask)
dev->dma_mask = &dev->coherent_dma_mask;
+
+ size = max(dev->coherent_dma_mask, dev->coherent_dma_mask + 1);
+
+ if (dev_is_pci(dev))
+ ret = acpi_dma_get_range(dev, &dmaaddr, &offset, &size);
+ else
+ ret = nc_dma_get_range(dev, &size);
+
+ if (!ret) {
+ msb = fls64(dmaaddr + size - 1);
+ /*
+ * Round-up to the power-of-two mask or set
+ * the mask to the whole 64-bit address space
+ * in case the DMA region covers the full
+ * memory window.
+ */
+ mask = msb == 64 ? U64_MAX : (1ULL << msb) - 1;
+ /*
+ * Limit coherent and dma mask based on size
+ * retrieved from firmware.
+ */
+ dev->coherent_dma_mask = mask;
+ *dev->dma_mask = mask;
+ }
+
+ *dma_addr = dmaaddr;
+ *dma_size = size;
+
+ dev->dma_pfn_offset = PFN_DOWN(offset);
+ dev_dbg(dev, "dma_pfn_offset(%#08llx)\n", offset);
}
/**
@@ -713,9 +779,9 @@ void iort_set_dma_mask(struct device *dev)
const struct iommu_ops *iort_iommu_configure(struct device *dev)
{
struct acpi_iort_node *node, *parent;
- const struct iommu_ops *ops = NULL;
+ const struct iommu_ops *ops;
u32 streamid = 0;
- int err;
+ int err = -ENODEV;
/*
* If we already translated the fwspec there
@@ -727,21 +793,16 @@ const struct iommu_ops *iort_iommu_configure(struct device *dev)
if (dev_is_pci(dev)) {
struct pci_bus *bus = to_pci_dev(dev)->bus;
- u32 rid;
-
- pci_for_each_dma_alias(to_pci_dev(dev), __get_pci_rid,
- &rid);
+ struct iort_pci_alias_info info = { .dev = dev };
node = iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX,
iort_match_node_callback, &bus->dev);
if (!node)
return NULL;
- parent = iort_node_map_id(node, rid, &streamid,
- IORT_IOMMU_TYPE);
-
- ops = iort_iommu_xlate(dev, parent, streamid);
-
+ info.node = node;
+ err = pci_for_each_dma_alias(to_pci_dev(dev),
+ iort_pci_iommu_init, &info);
} else {
int i = 0;
@@ -750,31 +811,30 @@ const struct iommu_ops *iort_iommu_configure(struct device *dev)
if (!node)
return NULL;
- parent = iort_node_map_platform_id(node, &streamid,
- IORT_IOMMU_TYPE, i++);
-
- while (parent) {
- ops = iort_iommu_xlate(dev, parent, streamid);
- if (IS_ERR_OR_NULL(ops))
- return ops;
-
+ do {
parent = iort_node_map_platform_id(node, &streamid,
IORT_IOMMU_TYPE,
i++);
- }
+
+ if (parent)
+ err = iort_iommu_xlate(dev, parent, streamid);
+ } while (parent && !err);
}
/*
* If we have reason to believe the IOMMU driver missed the initial
* add_device callback for dev, replay it to get things in order.
*/
- err = iort_add_device_replay(ops, dev);
- if (err)
- ops = ERR_PTR(err);
+ if (!err) {
+ ops = iort_fwspec_iommu_ops(dev->iommu_fwspec);
+ err = iort_add_device_replay(ops, dev);
+ }
/* Ignore all other errors apart from EPROBE_DEFER */
- if (IS_ERR(ops) && (PTR_ERR(ops) != -EPROBE_DEFER)) {
- dev_dbg(dev, "Adding to IOMMU failed: %ld\n", PTR_ERR(ops));
+ if (err == -EPROBE_DEFER) {
+ ops = ERR_PTR(err);
+ } else if (err) {
+ dev_dbg(dev, "Adding to IOMMU failed: %d\n", err);
ops = NULL;
}
@@ -908,6 +968,27 @@ static bool __init arm_smmu_v3_is_coherent(struct acpi_iort_node *node)
return smmu->flags & ACPI_IORT_SMMU_V3_COHACC_OVERRIDE;
}
+#if defined(CONFIG_ACPI_NUMA) && defined(ACPI_IORT_SMMU_V3_PXM_VALID)
+/*
+ * set numa proximity domain for smmuv3 device
+ */
+static void __init arm_smmu_v3_set_proximity(struct device *dev,
+ struct acpi_iort_node *node)
+{
+ struct acpi_iort_smmu_v3 *smmu;
+
+ smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
+ if (smmu->flags & ACPI_IORT_SMMU_V3_PXM_VALID) {
+ set_dev_node(dev, acpi_map_pxm_to_node(smmu->pxm));
+ pr_info("SMMU-v3[%llx] Mapped to Proximity domain %d\n",
+ smmu->base_address,
+ smmu->pxm);
+ }
+}
+#else
+#define arm_smmu_v3_set_proximity NULL
+#endif
+
static int __init arm_smmu_count_resources(struct acpi_iort_node *node)
{
struct acpi_iort_smmu *smmu;
@@ -977,13 +1058,16 @@ struct iort_iommu_config {
int (*iommu_count_resources)(struct acpi_iort_node *node);
void (*iommu_init_resources)(struct resource *res,
struct acpi_iort_node *node);
+ void (*iommu_set_proximity)(struct device *dev,
+ struct acpi_iort_node *node);
};
static const struct iort_iommu_config iort_arm_smmu_v3_cfg __initconst = {
.name = "arm-smmu-v3",
.iommu_is_coherent = arm_smmu_v3_is_coherent,
.iommu_count_resources = arm_smmu_v3_count_resources,
- .iommu_init_resources = arm_smmu_v3_init_resources
+ .iommu_init_resources = arm_smmu_v3_init_resources,
+ .iommu_set_proximity = arm_smmu_v3_set_proximity,
};
static const struct iort_iommu_config iort_arm_smmu_cfg __initconst = {
@@ -1028,6 +1112,9 @@ static int __init iort_add_smmu_platform_device(struct acpi_iort_node *node)
if (!pdev)
return -ENOMEM;
+ if (ops->iommu_set_proximity)
+ ops->iommu_set_proximity(&pdev->dev, node);
+
count = ops->iommu_count_resources(node);
r = kcalloc(count, sizeof(*r), GFP_KERNEL);
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 1cbb88d938e5..13e7b56e33ae 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -620,7 +620,7 @@ static ssize_t acpi_battery_alarm_store(struct device *dev,
return count;
}
-static struct device_attribute alarm_attr = {
+static const struct device_attribute alarm_attr = {
.attr = {.name = "alarm", .mode = 0644},
.show = acpi_battery_alarm_show,
.store = acpi_battery_alarm_store,
diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
index bb542acc0574..995c4d8922b1 100644
--- a/drivers/acpi/blacklist.c
+++ b/drivers/acpi/blacklist.c
@@ -30,30 +30,13 @@
#include "internal.h"
-enum acpi_blacklist_predicates {
- all_versions,
- less_than_or_equal,
- equal,
- greater_than_or_equal,
-};
-
-struct acpi_blacklist_item {
- char oem_id[7];
- char oem_table_id[9];
- u32 oem_revision;
- char *table;
- enum acpi_blacklist_predicates oem_revision_predicate;
- char *reason;
- u32 is_critical_error;
-};
-
-static struct dmi_system_id acpi_rev_dmi_table[] __initdata;
+static const struct dmi_system_id acpi_rev_dmi_table[] __initconst;
/*
* POLICY: If *anything* doesn't work, put it on the blacklist.
* If they are critical errors, mark it critical, and abort driver load.
*/
-static struct acpi_blacklist_item acpi_blacklist[] __initdata = {
+static struct acpi_platform_list acpi_blacklist[] __initdata = {
/* Compaq Presario 1700 */
{"PTLTD ", " DSDT ", 0x06040000, ACPI_SIG_DSDT, less_than_or_equal,
"Multiple problems", 1},
@@ -67,65 +50,27 @@ static struct acpi_blacklist_item acpi_blacklist[] __initdata = {
{"IBM ", "TP600E ", 0x00000105, ACPI_SIG_DSDT, less_than_or_equal,
"Incorrect _ADR", 1},
- {""}
+ { }
};
int __init acpi_blacklisted(void)
{
- int i = 0;
+ int i;
int blacklisted = 0;
- struct acpi_table_header table_header;
-
- while (acpi_blacklist[i].oem_id[0] != '\0') {
- if (acpi_get_table_header(acpi_blacklist[i].table, 0, &table_header)) {
- i++;
- continue;
- }
-
- if (strncmp(acpi_blacklist[i].oem_id, table_header.oem_id, 6)) {
- i++;
- continue;
- }
-
- if (strncmp
- (acpi_blacklist[i].oem_table_id, table_header.oem_table_id,
- 8)) {
- i++;
- continue;
- }
-
- if ((acpi_blacklist[i].oem_revision_predicate == all_versions)
- || (acpi_blacklist[i].oem_revision_predicate ==
- less_than_or_equal
- && table_header.oem_revision <=
- acpi_blacklist[i].oem_revision)
- || (acpi_blacklist[i].oem_revision_predicate ==
- greater_than_or_equal
- && table_header.oem_revision >=
- acpi_blacklist[i].oem_revision)
- || (acpi_blacklist[i].oem_revision_predicate == equal
- && table_header.oem_revision ==
- acpi_blacklist[i].oem_revision)) {
- printk(KERN_ERR PREFIX
- "Vendor \"%6.6s\" System \"%8.8s\" "
- "Revision 0x%x has a known ACPI BIOS problem.\n",
- acpi_blacklist[i].oem_id,
- acpi_blacklist[i].oem_table_id,
- acpi_blacklist[i].oem_revision);
+ i = acpi_match_platform_list(acpi_blacklist);
+ if (i >= 0) {
+ pr_err(PREFIX "Vendor \"%6.6s\" System \"%8.8s\" Revision 0x%x has a known ACPI BIOS problem.\n",
+ acpi_blacklist[i].oem_id,
+ acpi_blacklist[i].oem_table_id,
+ acpi_blacklist[i].oem_revision);
- printk(KERN_ERR PREFIX
- "Reason: %s. This is a %s error\n",
- acpi_blacklist[i].reason,
- (acpi_blacklist[i].
- is_critical_error ? "non-recoverable" :
- "recoverable"));
+ pr_err(PREFIX "Reason: %s. This is a %s error\n",
+ acpi_blacklist[i].reason,
+ (acpi_blacklist[i].data ?
+ "non-recoverable" : "recoverable"));
- blacklisted = acpi_blacklist[i].is_critical_error;
- break;
- } else {
- i++;
- }
+ blacklisted = acpi_blacklist[i].data;
}
(void)early_acpi_osi_init();
@@ -144,7 +89,7 @@ static int __init dmi_enable_rev_override(const struct dmi_system_id *d)
}
#endif
-static struct dmi_system_id acpi_rev_dmi_table[] __initdata = {
+static const struct dmi_system_id acpi_rev_dmi_table[] __initconst = {
#ifdef CONFIG_ACPI_REV_OVERRIDE_POSSIBLE
/*
* DELL XPS 13 (2015) switches sound between HDA and I2S
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index af74b420ec83..4d0979e02a28 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -67,7 +67,7 @@ static int set_copy_dsdt(const struct dmi_system_id *id)
}
#endif
-static struct dmi_system_id dsdt_dmi_table[] __initdata = {
+static const struct dmi_system_id dsdt_dmi_table[] __initconst = {
/*
* Invoke DSDT corruption work-around on all Toshiba Satellite.
* https://bugzilla.kernel.org/show_bug.cgi?id=14679
@@ -83,7 +83,7 @@ static struct dmi_system_id dsdt_dmi_table[] __initdata = {
{}
};
#else
-static struct dmi_system_id dsdt_dmi_table[] __initdata = {
+static const struct dmi_system_id dsdt_dmi_table[] __initconst = {
{}
};
#endif
@@ -995,9 +995,6 @@ void __init acpi_early_init(void)
printk(KERN_INFO PREFIX "Core revision %08x\n", ACPI_CA_VERSION);
- /* It's safe to verify table checksums during late stage */
- acpi_gbl_verify_table_checksum = TRUE;
-
/* enable workarounds, unless strict ACPI spec. compliance */
if (!acpi_strict)
acpi_gbl_enable_interpreter_slack = TRUE;
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index 2ed6935d4483..fbcc73f7a099 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -401,6 +401,8 @@ static void acpi_pm_notify_handler(acpi_handle handle, u32 val, void *not_used)
if (val != ACPI_NOTIFY_DEVICE_WAKE)
return;
+ acpi_handle_debug(handle, "Wake notify\n");
+
adev = acpi_bus_get_acpi_device(handle);
if (!adev)
return;
@@ -409,8 +411,12 @@ static void acpi_pm_notify_handler(acpi_handle handle, u32 val, void *not_used)
if (adev->wakeup.flags.notifier_present) {
pm_wakeup_ws_event(adev->wakeup.ws, 0, acpi_s2idle_wakeup());
- if (adev->wakeup.context.func)
+ if (adev->wakeup.context.func) {
+ acpi_handle_debug(handle, "Running %pF for %s\n",
+ adev->wakeup.context.func,
+ dev_name(adev->wakeup.context.dev));
adev->wakeup.context.func(&adev->wakeup.context);
+ }
}
mutex_unlock(&acpi_pm_notifier_lock);
@@ -682,55 +688,88 @@ static void acpi_pm_notify_work_func(struct acpi_device_wakeup_context *context)
}
}
+static DEFINE_MUTEX(acpi_wakeup_lock);
+
+static int __acpi_device_wakeup_enable(struct acpi_device *adev,
+ u32 target_state, int max_count)
+{
+ struct acpi_device_wakeup *wakeup = &adev->wakeup;
+ acpi_status status;
+ int error = 0;
+
+ mutex_lock(&acpi_wakeup_lock);
+
+ if (wakeup->enable_count >= max_count)
+ goto out;
+
+ if (wakeup->enable_count > 0)
+ goto inc;
+
+ error = acpi_enable_wakeup_device_power(adev, target_state);
+ if (error)
+ goto out;
+
+ status = acpi_enable_gpe(wakeup->gpe_device, wakeup->gpe_number);
+ if (ACPI_FAILURE(status)) {
+ acpi_disable_wakeup_device_power(adev);
+ error = -EIO;
+ goto out;
+ }
+
+inc:
+ wakeup->enable_count++;
+
+out:
+ mutex_unlock(&acpi_wakeup_lock);
+ return error;
+}
+
/**
- * acpi_device_wakeup - Enable/disable wakeup functionality for device.
- * @adev: ACPI device to enable/disable wakeup functionality for.
+ * acpi_device_wakeup_enable - Enable wakeup functionality for device.
+ * @adev: ACPI device to enable wakeup functionality for.
* @target_state: State the system is transitioning into.
- * @enable: Whether to enable or disable the wakeup functionality.
*
- * Enable/disable the GPE associated with @adev so that it can generate
- * wakeup signals for the device in response to external (remote) events and
- * enable/disable device wakeup power.
+ * Enable the GPE associated with @adev so that it can generate wakeup signals
+ * for the device in response to external (remote) events and enable wakeup
+ * power for it.
+ *
+ * Callers must ensure that @adev is a valid ACPI device node before executing
+ * this function.
+ */
+static int acpi_device_wakeup_enable(struct acpi_device *adev, u32 target_state)
+{
+ return __acpi_device_wakeup_enable(adev, target_state, 1);
+}
+
+/**
+ * acpi_device_wakeup_disable - Disable wakeup functionality for device.
+ * @adev: ACPI device to disable wakeup functionality for.
+ *
+ * Disable the GPE associated with @adev and disable wakeup power for it.
*
* Callers must ensure that @adev is a valid ACPI device node before executing
* this function.
*/
-static int acpi_device_wakeup(struct acpi_device *adev, u32 target_state,
- bool enable)
+static void acpi_device_wakeup_disable(struct acpi_device *adev)
{
struct acpi_device_wakeup *wakeup = &adev->wakeup;
- if (enable) {
- acpi_status res;
- int error;
+ mutex_lock(&acpi_wakeup_lock);
- if (adev->wakeup.flags.enabled)
- return 0;
+ if (!wakeup->enable_count)
+ goto out;
- error = acpi_enable_wakeup_device_power(adev, target_state);
- if (error)
- return error;
+ acpi_disable_gpe(wakeup->gpe_device, wakeup->gpe_number);
+ acpi_disable_wakeup_device_power(adev);
- res = acpi_enable_gpe(wakeup->gpe_device, wakeup->gpe_number);
- if (ACPI_FAILURE(res)) {
- acpi_disable_wakeup_device_power(adev);
- return -EIO;
- }
- adev->wakeup.flags.enabled = 1;
- } else if (adev->wakeup.flags.enabled) {
- acpi_disable_gpe(wakeup->gpe_device, wakeup->gpe_number);
- acpi_disable_wakeup_device_power(adev);
- adev->wakeup.flags.enabled = 0;
- }
- return 0;
+ wakeup->enable_count--;
+
+out:
+ mutex_unlock(&acpi_wakeup_lock);
}
-/**
- * acpi_pm_set_device_wakeup - Enable/disable remote wakeup for given device.
- * @dev: Device to enable/disable to generate wakeup events.
- * @enable: Whether to enable or disable the wakeup functionality.
- */
-int acpi_pm_set_device_wakeup(struct device *dev, bool enable)
+static int __acpi_pm_set_device_wakeup(struct device *dev, bool enable,
+ int max_count)
{
struct acpi_device *adev;
int error;
@@ -744,13 +783,41 @@ int acpi_pm_set_device_wakeup(struct device *dev, bool enable)
if (!acpi_device_can_wakeup(adev))
return -EINVAL;
- error = acpi_device_wakeup(adev, acpi_target_system_state(), enable);
+ if (!enable) {
+ acpi_device_wakeup_disable(adev);
+ dev_dbg(dev, "Wakeup disabled by ACPI\n");
+ return 0;
+ }
+
+ error = __acpi_device_wakeup_enable(adev, acpi_target_system_state(),
+ max_count);
if (!error)
- dev_dbg(dev, "Wakeup %s by ACPI\n", enable ? "enabled" : "disabled");
+ dev_dbg(dev, "Wakeup enabled by ACPI\n");
return error;
}
-EXPORT_SYMBOL(acpi_pm_set_device_wakeup);
+
+/**
+ * acpi_pm_set_device_wakeup - Enable/disable remote wakeup for given device.
+ * @dev: Device to enable/disable to generate wakeup events.
+ * @enable: Whether to enable or disable the wakeup functionality.
+ */
+int acpi_pm_set_device_wakeup(struct device *dev, bool enable)
+{
+ return __acpi_pm_set_device_wakeup(dev, enable, 1);
+}
+EXPORT_SYMBOL_GPL(acpi_pm_set_device_wakeup);
+
+/**
+ * acpi_pm_set_bridge_wakeup - Enable/disable remote wakeup for given bridge.
+ * @dev: Bridge device to enable/disable to generate wakeup events.
+ * @enable: Whether to enable or disable the wakeup functionality.
+ */
+int acpi_pm_set_bridge_wakeup(struct device *dev, bool enable)
+{
+ return __acpi_pm_set_device_wakeup(dev, enable, INT_MAX);
+}
+EXPORT_SYMBOL_GPL(acpi_pm_set_bridge_wakeup);
/**
* acpi_dev_pm_low_power - Put ACPI device into a low-power state.
@@ -800,13 +867,15 @@ int acpi_dev_runtime_suspend(struct device *dev)
remote_wakeup = dev_pm_qos_flags(dev, PM_QOS_FLAG_REMOTE_WAKEUP) >
PM_QOS_FLAGS_NONE;
- error = acpi_device_wakeup(adev, ACPI_STATE_S0, remote_wakeup);
- if (remote_wakeup && error)
- return -EAGAIN;
+ if (remote_wakeup) {
+ error = acpi_device_wakeup_enable(adev, ACPI_STATE_S0);
+ if (error)
+ return -EAGAIN;
+ }
error = acpi_dev_pm_low_power(dev, adev, ACPI_STATE_S0);
- if (error)
- acpi_device_wakeup(adev, ACPI_STATE_S0, false);
+ if (error && remote_wakeup)
+ acpi_device_wakeup_disable(adev);
return error;
}
@@ -829,7 +898,7 @@ int acpi_dev_runtime_resume(struct device *dev)
return 0;
error = acpi_dev_pm_full_power(adev);
- acpi_device_wakeup(adev, ACPI_STATE_S0, false);
+ acpi_device_wakeup_disable(adev);
return error;
}
EXPORT_SYMBOL_GPL(acpi_dev_runtime_resume);
@@ -884,13 +953,15 @@ int acpi_dev_suspend_late(struct device *dev)
target_state = acpi_target_system_state();
wakeup = device_may_wakeup(dev) && acpi_device_can_wakeup(adev);
- error = acpi_device_wakeup(adev, target_state, wakeup);
- if (wakeup && error)
- return error;
+ if (wakeup) {
+ error = acpi_device_wakeup_enable(adev, target_state);
+ if (error)
+ return error;
+ }
error = acpi_dev_pm_low_power(dev, adev, target_state);
- if (error)
- acpi_device_wakeup(adev, ACPI_STATE_UNKNOWN, false);
+ if (error && wakeup)
+ acpi_device_wakeup_disable(adev);
return error;
}
@@ -913,7 +984,7 @@ int acpi_dev_resume_early(struct device *dev)
return 0;
error = acpi_dev_pm_full_power(adev);
- acpi_device_wakeup(adev, ACPI_STATE_UNKNOWN, false);
+ acpi_device_wakeup_disable(adev);
return error;
}
EXPORT_SYMBOL_GPL(acpi_dev_resume_early);
@@ -1056,7 +1127,7 @@ static void acpi_dev_pm_detach(struct device *dev, bool power_off)
*/
dev_pm_qos_hide_latency_limit(dev);
dev_pm_qos_hide_flags(dev);
- acpi_device_wakeup(adev, ACPI_STATE_S0, false);
+ acpi_device_wakeup_disable(adev);
acpi_dev_pm_low_power(dev, adev, ACPI_STATE_S0);
}
}
@@ -1100,7 +1171,7 @@ int acpi_dev_pm_attach(struct device *dev, bool power_on)
dev_pm_domain_set(dev, &acpi_general_pm_domain);
if (power_on) {
acpi_dev_pm_full_power(adev);
- acpi_device_wakeup(adev, ACPI_STATE_S0, false);
+ acpi_device_wakeup_disable(adev);
}
dev->pm_domain->detach = acpi_dev_pm_detach;
diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c
index 0c00208b423e..2305e1ab978e 100644
--- a/drivers/acpi/dock.c
+++ b/drivers/acpi/dock.c
@@ -585,7 +585,7 @@ static struct attribute *dock_attributes[] = {
NULL
};
-static struct attribute_group dock_attribute_group = {
+static const struct attribute_group dock_attribute_group = {
.attrs = dock_attributes
};
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index ae3d6d152633..236b14324780 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -112,8 +112,7 @@ enum {
EC_FLAGS_EVT_HANDLER_INSTALLED, /* _Qxx handlers installed */
EC_FLAGS_STARTED, /* Driver is started */
EC_FLAGS_STOPPED, /* Driver is stopped */
- EC_FLAGS_COMMAND_STORM, /* GPE storms occurred to the
- * current command processing */
+ EC_FLAGS_GPE_MASKED, /* GPE masked */
};
#define ACPI_EC_COMMAND_POLL 0x01 /* Available for command byte */
@@ -425,19 +424,19 @@ static void acpi_ec_complete_request(struct acpi_ec *ec)
wake_up(&ec->wait);
}
-static void acpi_ec_set_storm(struct acpi_ec *ec, u8 flag)
+static void acpi_ec_mask_gpe(struct acpi_ec *ec)
{
- if (!test_bit(flag, &ec->flags)) {
+ if (!test_bit(EC_FLAGS_GPE_MASKED, &ec->flags)) {
acpi_ec_disable_gpe(ec, false);
ec_dbg_drv("Polling enabled");
- set_bit(flag, &ec->flags);
+ set_bit(EC_FLAGS_GPE_MASKED, &ec->flags);
}
}
-static void acpi_ec_clear_storm(struct acpi_ec *ec, u8 flag)
+static void acpi_ec_unmask_gpe(struct acpi_ec *ec)
{
- if (test_bit(flag, &ec->flags)) {
- clear_bit(flag, &ec->flags);
+ if (test_bit(EC_FLAGS_GPE_MASKED, &ec->flags)) {
+ clear_bit(EC_FLAGS_GPE_MASKED, &ec->flags);
acpi_ec_enable_gpe(ec, false);
ec_dbg_drv("Polling disabled");
}
@@ -464,7 +463,7 @@ static bool acpi_ec_submit_flushable_request(struct acpi_ec *ec)
static void acpi_ec_submit_query(struct acpi_ec *ec)
{
- acpi_ec_set_storm(ec, EC_FLAGS_COMMAND_STORM);
+ acpi_ec_mask_gpe(ec);
if (!acpi_ec_event_enabled(ec))
return;
if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags)) {
@@ -480,7 +479,7 @@ static void acpi_ec_complete_query(struct acpi_ec *ec)
if (test_and_clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags))
ec_dbg_evt("Command(%s) unblocked",
acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY));
- acpi_ec_clear_storm(ec, EC_FLAGS_COMMAND_STORM);
+ acpi_ec_unmask_gpe(ec);
}
static inline void __acpi_ec_enable_event(struct acpi_ec *ec)
@@ -700,7 +699,7 @@ err:
++t->irq_count;
/* Allow triggering on 0 threshold */
if (t->irq_count == ec_storm_threshold)
- acpi_ec_set_storm(ec, EC_FLAGS_COMMAND_STORM);
+ acpi_ec_mask_gpe(ec);
}
}
out:
@@ -798,7 +797,7 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec,
spin_lock_irqsave(&ec->lock, tmp);
if (t->irq_count == ec_storm_threshold)
- acpi_ec_clear_storm(ec, EC_FLAGS_COMMAND_STORM);
+ acpi_ec_unmask_gpe(ec);
ec_dbg_req("Command(%s) stopped", acpi_ec_cmd_string(t->command));
ec->curr = NULL;
/* Disable GPE for command processing (IBF=0/OBF=1) */
@@ -1586,9 +1585,7 @@ static bool acpi_is_boot_ec(struct acpi_ec *ec)
{
if (!boot_ec)
return false;
- if (ec->handle == boot_ec->handle &&
- ec->gpe == boot_ec->gpe &&
- ec->command_addr == boot_ec->command_addr &&
+ if (ec->command_addr == boot_ec->command_addr &&
ec->data_addr == boot_ec->data_addr)
return true;
return false;
@@ -1613,6 +1610,13 @@ static int acpi_ec_add(struct acpi_device *device)
if (acpi_is_boot_ec(ec)) {
boot_ec_is_ecdt = false;
+ /*
+ * Trust PNP0C09 namespace location rather than ECDT ID.
+ *
+ * But trust ECDT GPE rather than _GPE because of ASUS quirks,
+ * so do not change boot_ec->gpe to ec->gpe.
+ */
+ boot_ec->handle = ec->handle;
acpi_handle_debug(ec->handle, "duplicated.\n");
acpi_ec_free(ec);
ec = boot_ec;
@@ -1747,18 +1751,20 @@ static int __init acpi_ec_ecdt_start(void)
if (!boot_ec)
return -ENODEV;
- /*
- * The DSDT EC should have already been started in
- * acpi_ec_add().
- */
+ /* In case acpi_ec_ecdt_start() is called after acpi_ec_add() */
if (!boot_ec_is_ecdt)
return -ENODEV;
/*
* At this point, the namespace and the GPE is initialized, so
* start to find the namespace objects and handle the events.
+ *
+ * Note: ec->handle can be valid if this function is called after
+ * acpi_ec_add(), hence the fast path.
*/
- if (!acpi_ec_ecdt_get_handle(&handle))
+ if (boot_ec->handle != ACPI_ROOT_OBJECT)
+ handle = boot_ec->handle;
+ else if (!acpi_ec_ecdt_get_handle(&handle))
return -ENODEV;
return acpi_config_boot_ec(boot_ec, handle, true, true);
}
@@ -1803,7 +1809,7 @@ static int ec_honor_ecdt_gpe(const struct dmi_system_id *id)
return 0;
}
-static struct dmi_system_id ec_dmi_table[] __initdata = {
+static const struct dmi_system_id ec_dmi_table[] __initconst = {
{
ec_correct_ecdt, "MSI MS-171F", {
DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star"),
@@ -2011,8 +2017,8 @@ int __init acpi_ec_init(void)
return result;
/* Drivers must be started after acpi_ec_query_init() */
- ecdt_fail = acpi_ec_ecdt_start();
dsdt_fail = acpi_bus_register_driver(&acpi_ec_driver);
+ ecdt_fail = acpi_ec_ecdt_start();
return ecdt_fail && dsdt_fail ? -ENODEV : 0;
}
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index 3f5af4d7a739..4361c4415b4f 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -232,6 +232,12 @@ static inline void suspend_nvs_restore(void) {}
void acpi_init_properties(struct acpi_device *adev);
void acpi_free_properties(struct acpi_device *adev);
+#ifdef CONFIG_X86
+void acpi_extract_apple_properties(struct acpi_device *adev);
+#else
+static inline void acpi_extract_apple_properties(struct acpi_device *adev) {}
+#endif
+
/*--------------------------------------------------------------------------
Watchdog
-------------------------------------------------------------------------- */
diff --git a/drivers/acpi/nfit/Kconfig b/drivers/acpi/nfit/Kconfig
index 6d3351452ea2..929ba4da0b30 100644
--- a/drivers/acpi/nfit/Kconfig
+++ b/drivers/acpi/nfit/Kconfig
@@ -2,7 +2,7 @@ config ACPI_NFIT
tristate "ACPI NVDIMM Firmware Interface Table (NFIT)"
depends on PHYS_ADDR_T_64BIT
depends on BLK_DEV
- depends on ARCH_HAS_MMIO_FLUSH
+ depends on ARCH_HAS_PMEM_API
select LIBNVDIMM
help
Infrastructure to probe ACPI 6 compliant platforms for
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index 19182d091587..9c2c49b6a240 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -228,6 +228,10 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
if (cmd == ND_CMD_CALL) {
call_pkg = buf;
func = call_pkg->nd_command;
+
+ for (i = 0; i < ARRAY_SIZE(call_pkg->nd_reserved2); i++)
+ if (call_pkg->nd_reserved2[i])
+ return -EINVAL;
}
if (nvdimm) {
@@ -1674,8 +1678,19 @@ static ssize_t range_index_show(struct device *dev,
}
static DEVICE_ATTR_RO(range_index);
+static ssize_t ecc_unit_size_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nd_region *nd_region = to_nd_region(dev);
+ struct nfit_spa *nfit_spa = nd_region_provider_data(nd_region);
+
+ return sprintf(buf, "%d\n", nfit_spa->clear_err_unit);
+}
+static DEVICE_ATTR_RO(ecc_unit_size);
+
static struct attribute *acpi_nfit_region_attributes[] = {
&dev_attr_range_index.attr,
+ &dev_attr_ecc_unit_size.attr,
NULL,
};
@@ -1804,6 +1819,7 @@ static int acpi_nfit_init_interleave_set(struct acpi_nfit_desc *acpi_desc,
struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
struct acpi_nfit_memory_map *memdev = memdev_from_spa(acpi_desc,
spa->range_index, i);
+ struct acpi_nfit_control_region *dcr = nfit_mem->dcr;
if (!memdev || !nfit_mem->dcr) {
dev_err(dev, "%s: failed to find DCR\n", __func__);
@@ -1811,13 +1827,13 @@ static int acpi_nfit_init_interleave_set(struct acpi_nfit_desc *acpi_desc,
}
map->region_offset = memdev->region_offset;
- map->serial_number = nfit_mem->dcr->serial_number;
+ map->serial_number = dcr->serial_number;
map2->region_offset = memdev->region_offset;
- map2->serial_number = nfit_mem->dcr->serial_number;
- map2->vendor_id = nfit_mem->dcr->vendor_id;
- map2->manufacturing_date = nfit_mem->dcr->manufacturing_date;
- map2->manufacturing_location = nfit_mem->dcr->manufacturing_location;
+ map2->serial_number = dcr->serial_number;
+ map2->vendor_id = dcr->vendor_id;
+ map2->manufacturing_date = dcr->manufacturing_date;
+ map2->manufacturing_location = dcr->manufacturing_location;
}
/* v1.1 namespaces */
@@ -1835,6 +1851,28 @@ static int acpi_nfit_init_interleave_set(struct acpi_nfit_desc *acpi_desc,
cmp_map_compat, NULL);
nd_set->altcookie = nd_fletcher64(info, sizeof_nfit_set_info(nr), 0);
+ /* record the result of the sort for the mapping position */
+ for (i = 0; i < nr; i++) {
+ struct nfit_set_info_map2 *map2 = &info2->mapping[i];
+ int j;
+
+ for (j = 0; j < nr; j++) {
+ struct nd_mapping_desc *mapping = &ndr_desc->mapping[j];
+ struct nvdimm *nvdimm = mapping->nvdimm;
+ struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
+ struct acpi_nfit_control_region *dcr = nfit_mem->dcr;
+
+ if (map2->serial_number == dcr->serial_number &&
+ map2->vendor_id == dcr->vendor_id &&
+ map2->manufacturing_date == dcr->manufacturing_date &&
+ map2->manufacturing_location
+ == dcr->manufacturing_location) {
+ mapping->position = i;
+ break;
+ }
+ }
+ }
+
ndr_desc->nd_set = nd_set;
devm_kfree(dev, info);
devm_kfree(dev, info2);
@@ -1930,7 +1968,7 @@ static int acpi_nfit_blk_single_io(struct nfit_blk *nfit_blk,
memcpy_flushcache(mmio->addr.aperture + offset, iobuf + copied, c);
else {
if (nfit_blk->dimm_flags & NFIT_BLK_READ_FLUSH)
- mmio_flush_range((void __force *)
+ arch_invalidate_pmem((void __force *)
mmio->addr.aperture + offset, c);
memcpy(iobuf + copied, mmio->addr.aperture + offset, c);
@@ -2884,7 +2922,7 @@ static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor *nd_desc)
* need to be interruptible while waiting.
*/
INIT_WORK_ONSTACK(&flush.work, flush_probe);
- COMPLETION_INITIALIZER_ONSTACK(flush.cmp);
+ init_completion(&flush.cmp);
queue_work(nfit_wq, &flush.work);
mutex_unlock(&acpi_desc->init_mutex);
diff --git a/drivers/acpi/osi.c b/drivers/acpi/osi.c
index 723bee58bbcf..76998a51bf99 100644
--- a/drivers/acpi/osi.c
+++ b/drivers/acpi/osi.c
@@ -27,6 +27,7 @@
#include <linux/kernel.h>
#include <linux/acpi.h>
#include <linux/dmi.h>
+#include <linux/platform_data/x86/apple.h>
#include "internal.h"
@@ -257,12 +258,11 @@ bool acpi_osi_is_win8(void)
}
EXPORT_SYMBOL(acpi_osi_is_win8);
-static void __init acpi_osi_dmi_darwin(bool enable,
- const struct dmi_system_id *d)
+static void __init acpi_osi_dmi_darwin(void)
{
- pr_notice("DMI detected to setup _OSI(\"Darwin\"): %s\n", d->ident);
+ pr_notice("DMI detected to setup _OSI(\"Darwin\"): Apple hardware\n");
osi_config.darwin_dmi = 1;
- __acpi_osi_setup_darwin(enable);
+ __acpi_osi_setup_darwin(true);
}
static void __init acpi_osi_dmi_linux(bool enable,
@@ -273,13 +273,6 @@ static void __init acpi_osi_dmi_linux(bool enable,
__acpi_osi_setup_linux(enable);
}
-static int __init dmi_enable_osi_darwin(const struct dmi_system_id *d)
-{
- acpi_osi_dmi_darwin(true, d);
-
- return 0;
-}
-
static int __init dmi_enable_osi_linux(const struct dmi_system_id *d)
{
acpi_osi_dmi_linux(true, d);
@@ -319,7 +312,7 @@ static int __init dmi_disable_osi_win8(const struct dmi_system_id *d)
* Note that _OSI("Linux")/_OSI("Darwin") determined here can be overridden
* by acpi_osi=!Linux/acpi_osi=!Darwin command line options.
*/
-static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
+static const struct dmi_system_id acpi_osi_dmi_table[] __initconst = {
{
.callback = dmi_disable_osi_vista,
.ident = "Fujitsu Siemens",
@@ -481,30 +474,16 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
DMI_MATCH(DMI_PRODUCT_NAME, "1015PX"),
},
},
-
- /*
- * Enable _OSI("Darwin") for all apple platforms.
- */
- {
- .callback = dmi_enable_osi_darwin,
- .ident = "Apple hardware",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
- },
- },
- {
- .callback = dmi_enable_osi_darwin,
- .ident = "Apple hardware",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Apple Computer, Inc."),
- },
- },
{}
};
static __init void acpi_osi_dmi_blacklisted(void)
{
dmi_check_system(acpi_osi_dmi_table);
+
+ /* Enable _OSI("Darwin") for Apple platforms. */
+ if (x86_apple_machine)
+ acpi_osi_dmi_darwin();
}
int __init early_acpi_osi_init(void)
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index 9eec3095e6c3..6fc204a52493 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -33,6 +33,7 @@
#include <linux/acpi.h>
#include <linux/slab.h>
#include <linux/dmi.h>
+#include <linux/platform_data/x86/apple.h>
#include <acpi/apei.h> /* for acpi_hest_init() */
#include "internal.h"
@@ -431,8 +432,7 @@ static void negotiate_os_control(struct acpi_pci_root *root, int *no_aspm)
* been called successfully. We know the feature set supported by the
* platform, so avoid calling _OSC at all
*/
-
- if (dmi_match(DMI_SYS_VENDOR, "Apple Inc.")) {
+ if (x86_apple_machine) {
root->osc_control_set = ~OSC_PCI_EXPRESS_PME_CONTROL;
decode_osc_control(root, "OS assumes control of",
root->osc_control_set);
diff --git a/drivers/acpi/pci_slot.c b/drivers/acpi/pci_slot.c
index f62c68e24317..e90b61f7d2db 100644
--- a/drivers/acpi/pci_slot.c
+++ b/drivers/acpi/pci_slot.c
@@ -174,7 +174,7 @@ static int do_sta_before_sun(const struct dmi_system_id *d)
return 0;
}
-static struct dmi_system_id acpi_pci_slot_dmi_table[] __initdata = {
+static const struct dmi_system_id acpi_pci_slot_dmi_table[] __initconst = {
/*
* Fujitsu Primequest machines will return 1023 to indicate an
* error if the _SUN method is evaluated on SxFy objects that
diff --git a/drivers/acpi/pmic/intel_pmic_xpower.c b/drivers/acpi/pmic/intel_pmic_xpower.c
index 3b7d5be5b7ed..6c99d3f81095 100644
--- a/drivers/acpi/pmic/intel_pmic_xpower.c
+++ b/drivers/acpi/pmic/intel_pmic_xpower.c
@@ -27,6 +27,9 @@
#define GPI1_LDO_ON (3 << 0)
#define GPI1_LDO_OFF (4 << 0)
+#define AXP288_ADC_TS_PIN_GPADC 0xf2
+#define AXP288_ADC_TS_PIN_ON 0xf3
+
static struct pmic_table power_table[] = {
{
.address = 0x00,
@@ -209,11 +212,23 @@ static int intel_xpower_pmic_update_power(struct regmap *regmap, int reg,
static int intel_xpower_pmic_get_raw_temp(struct regmap *regmap, int reg)
{
u8 buf[2];
+ int ret;
- if (regmap_bulk_read(regmap, AXP288_GP_ADC_H, buf, 2))
- return -EIO;
+ ret = regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL,
+ AXP288_ADC_TS_PIN_GPADC);
+ if (ret)
+ return ret;
+
+ /* After switching to the GPADC pin give things some time to settle */
+ usleep_range(6000, 10000);
+
+ ret = regmap_bulk_read(regmap, AXP288_GP_ADC_H, buf, 2);
+ if (ret == 0)
+ ret = (buf[0] << 4) + ((buf[1] >> 4) & 0x0f);
+
+ regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL, AXP288_ADC_TS_PIN_ON);
- return (buf[0] << 4) + ((buf[1] >> 4) & 0x0F);
+ return ret;
}
static struct intel_pmic_opregion_data intel_xpower_pmic_opregion_data = {
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
index 591d1dd3f04e..9d6aff22684e 100644
--- a/drivers/acpi/processor_driver.c
+++ b/drivers/acpi/processor_driver.c
@@ -237,7 +237,7 @@ static int __acpi_processor_start(struct acpi_device *device)
result = acpi_cppc_processor_probe(pr);
if (result && !IS_ENABLED(CONFIG_ACPI_CPU_FREQ_PSS))
- dev_warn(&device->dev, "CPPC data invalid or not present\n");
+ dev_dbg(&device->dev, "CPPC data invalid or not present\n");
if (!cpuidle_get_driver() || cpuidle_get_driver() == &acpi_idle_driver)
acpi_processor_power_init(pr);
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 5c8aa9cf62d7..2736e25e9dc6 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -48,6 +48,8 @@
#define _COMPONENT ACPI_PROCESSOR_COMPONENT
ACPI_MODULE_NAME("processor_idle");
+#define ACPI_IDLE_STATE_START (IS_ENABLED(CONFIG_ARCH_HAS_CPU_RELAX) ? 1 : 0)
+
static unsigned int max_cstate __read_mostly = ACPI_PROCESSOR_MAX_POWER;
module_param(max_cstate, uint, 0000);
static unsigned int nocst __read_mostly;
@@ -708,8 +710,6 @@ static DEFINE_RAW_SPINLOCK(c3_lock);
static void acpi_idle_enter_bm(struct acpi_processor *pr,
struct acpi_processor_cx *cx, bool timer_bc)
{
- acpi_unlazy_tlb(smp_processor_id());
-
/*
* Must be done before busmaster disable as we might need to
* access HPET !
@@ -761,7 +761,7 @@ static int acpi_idle_enter(struct cpuidle_device *dev,
if (cx->type != ACPI_STATE_C1) {
if (acpi_idle_fallback_to_c1(pr) && num_online_cpus() > 1) {
- index = CPUIDLE_DRIVER_STATE_START;
+ index = ACPI_IDLE_STATE_START;
cx = per_cpu(acpi_cstate[index], dev->cpu);
} else if (cx->type == ACPI_STATE_C3 && pr->flags.bm_check) {
if (cx->bm_sts_skip || !acpi_idle_bm_check()) {
@@ -789,7 +789,7 @@ static int acpi_idle_enter(struct cpuidle_device *dev,
return index;
}
-static void acpi_idle_enter_freeze(struct cpuidle_device *dev,
+static void acpi_idle_enter_s2idle(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index)
{
struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
@@ -813,7 +813,7 @@ static void acpi_idle_enter_freeze(struct cpuidle_device *dev,
static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr,
struct cpuidle_device *dev)
{
- int i, count = CPUIDLE_DRIVER_STATE_START;
+ int i, count = ACPI_IDLE_STATE_START;
struct acpi_processor_cx *cx;
if (max_cstate == 0)
@@ -840,7 +840,7 @@ static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr,
static int acpi_processor_setup_cstates(struct acpi_processor *pr)
{
- int i, count = CPUIDLE_DRIVER_STATE_START;
+ int i, count;
struct acpi_processor_cx *cx;
struct cpuidle_state *state;
struct cpuidle_driver *drv = &acpi_idle_driver;
@@ -848,6 +848,13 @@ static int acpi_processor_setup_cstates(struct acpi_processor *pr)
if (max_cstate == 0)
max_cstate = 1;
+ if (IS_ENABLED(CONFIG_ARCH_HAS_CPU_RELAX)) {
+ cpuidle_poll_state_init(drv);
+ count = 1;
+ } else {
+ count = 0;
+ }
+
for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
cx = &pr->power.states[i];
@@ -867,14 +874,14 @@ static int acpi_processor_setup_cstates(struct acpi_processor *pr)
drv->safe_state_index = count;
}
/*
- * Halt-induced C1 is not good for ->enter_freeze, because it
+ * Halt-induced C1 is not good for ->enter_s2idle, because it
* re-enables interrupts on exit. Moreover, C1 is generally not
* particularly interesting from the suspend-to-idle angle, so
* avoid C1 and the situations in which we may need to fall back
* to it altogether.
*/
if (cx->type != ACPI_STATE_C1 && !acpi_idle_fallback_to_c1(pr))
- state->enter_freeze = acpi_idle_enter_freeze;
+ state->enter_s2idle = acpi_idle_enter_s2idle;
count++;
if (count == CPUIDLE_STATE_MAX)
@@ -1291,7 +1298,7 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
return -EINVAL;
drv->safe_state_index = -1;
- for (i = CPUIDLE_DRIVER_STATE_START; i < CPUIDLE_STATE_MAX; i++) {
+ for (i = ACPI_IDLE_STATE_START; i < CPUIDLE_STATE_MAX; i++) {
drv->states[i].name[0] = '\0';
drv->states[i].desc[0] = '\0';
}
diff --git a/drivers/acpi/processor_pdc.c b/drivers/acpi/processor_pdc.c
index 7cfbda4d7c51..74f738cb6073 100644
--- a/drivers/acpi/processor_pdc.c
+++ b/drivers/acpi/processor_pdc.c
@@ -173,7 +173,7 @@ static int __init set_no_mwait(const struct dmi_system_id *id)
return 0;
}
-static struct dmi_system_id processor_idle_dmi_table[] __initdata = {
+static const struct dmi_system_id processor_idle_dmi_table[] __initconst = {
{
set_no_mwait, "Extensa 5220", {
DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"),
diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
index 917c789f953d..c1c216163de3 100644
--- a/drivers/acpi/property.c
+++ b/drivers/acpi/property.c
@@ -19,21 +19,19 @@
#include "internal.h"
-static int acpi_data_get_property_array(struct acpi_device_data *data,
+static int acpi_data_get_property_array(const struct acpi_device_data *data,
const char *name,
acpi_object_type type,
const union acpi_object **obj);
-/* ACPI _DSD device properties UUID: daffd814-6eba-4d8c-8a91-bc9bbf4aa301 */
-static const u8 prp_uuid[16] = {
- 0x14, 0xd8, 0xff, 0xda, 0xba, 0x6e, 0x8c, 0x4d,
- 0x8a, 0x91, 0xbc, 0x9b, 0xbf, 0x4a, 0xa3, 0x01
-};
-/* ACPI _DSD data subnodes UUID: dbb8e3e6-5886-4ba6-8795-1319f52a966b */
-static const u8 ads_uuid[16] = {
- 0xe6, 0xe3, 0xb8, 0xdb, 0x86, 0x58, 0xa6, 0x4b,
- 0x87, 0x95, 0x13, 0x19, 0xf5, 0x2a, 0x96, 0x6b
-};
+/* ACPI _DSD device properties GUID: daffd814-6eba-4d8c-8a91-bc9bbf4aa301 */
+static const guid_t prp_guid =
+ GUID_INIT(0xdaffd814, 0x6eba, 0x4d8c,
+ 0x8a, 0x91, 0xbc, 0x9b, 0xbf, 0x4a, 0xa3, 0x01);
+/* ACPI _DSD data subnodes GUID: dbb8e3e6-5886-4ba6-8795-1319f52a966b */
+static const guid_t ads_guid =
+ GUID_INIT(0xdbb8e3e6, 0x5886, 0x4ba6,
+ 0x87, 0x95, 0x13, 0x19, 0xf5, 0x2a, 0x96, 0x6b);
static bool acpi_enumerate_nondev_subnodes(acpi_handle scope,
const union acpi_object *desc,
@@ -56,8 +54,7 @@ static bool acpi_nondev_subnode_extract(const union acpi_object *desc,
return false;
dn->name = link->package.elements[0].string.pointer;
- dn->fwnode.type = FWNODE_ACPI_DATA;
- dn->fwnode.ops = &acpi_fwnode_ops;
+ dn->fwnode.ops = &acpi_data_fwnode_ops;
dn->parent = parent;
INIT_LIST_HEAD(&dn->data.subnodes);
@@ -190,22 +187,23 @@ static bool acpi_enumerate_nondev_subnodes(acpi_handle scope,
{
int i;
- /* Look for the ACPI data subnodes UUID. */
+ /* Look for the ACPI data subnodes GUID. */
for (i = 0; i < desc->package.count; i += 2) {
- const union acpi_object *uuid, *links;
+ const union acpi_object *guid, *links;
- uuid = &desc->package.elements[i];
+ guid = &desc->package.elements[i];
links = &desc->package.elements[i + 1];
/*
- * The first element must be a UUID and the second one must be
+ * The first element must be a GUID and the second one must be
* a package.
*/
- if (uuid->type != ACPI_TYPE_BUFFER || uuid->buffer.length != 16
- || links->type != ACPI_TYPE_PACKAGE)
+ if (guid->type != ACPI_TYPE_BUFFER ||
+ guid->buffer.length != 16 ||
+ links->type != ACPI_TYPE_PACKAGE)
break;
- if (memcmp(uuid->buffer.pointer, ads_uuid, sizeof(ads_uuid)))
+ if (!guid_equal((guid_t *)guid->buffer.pointer, &ads_guid))
continue;
return acpi_add_nondev_subnodes(scope, links, &data->subnodes,
@@ -298,26 +296,27 @@ static bool acpi_extract_properties(const union acpi_object *desc,
if (desc->package.count % 2)
return false;
- /* Look for the device properties UUID. */
+ /* Look for the device properties GUID. */
for (i = 0; i < desc->package.count; i += 2) {
- const union acpi_object *uuid, *properties;
+ const union acpi_object *guid, *properties;
- uuid = &desc->package.elements[i];
+ guid = &desc->package.elements[i];
properties = &desc->package.elements[i + 1];
/*
- * The first element must be a UUID and the second one must be
+ * The first element must be a GUID and the second one must be
* a package.
*/
- if (uuid->type != ACPI_TYPE_BUFFER || uuid->buffer.length != 16
- || properties->type != ACPI_TYPE_PACKAGE)
+ if (guid->type != ACPI_TYPE_BUFFER ||
+ guid->buffer.length != 16 ||
+ properties->type != ACPI_TYPE_PACKAGE)
break;
- if (memcmp(uuid->buffer.pointer, prp_uuid, sizeof(prp_uuid)))
+ if (!guid_equal((guid_t *)guid->buffer.pointer, &prp_guid))
continue;
/*
- * We found the matching UUID. Now validate the format of the
+ * We found the matching GUID. Now validate the format of the
* package immediately following it.
*/
if (!acpi_properties_format_valid(properties))
@@ -339,6 +338,9 @@ void acpi_init_properties(struct acpi_device *adev)
INIT_LIST_HEAD(&adev->data.subnodes);
+ if (!adev->handle)
+ return;
+
/*
* Check if ACPI_DT_NAMESPACE_HID is present and inthat case we fill in
* Device Tree compatible properties for this device.
@@ -373,6 +375,9 @@ void acpi_init_properties(struct acpi_device *adev)
if (acpi_of && !adev->flags.of_compatible_ok)
acpi_handle_info(adev->handle,
ACPI_DT_NAMESPACE_HID " requires 'compatible' property\n");
+
+ if (!adev->data.pointer)
+ acpi_extract_apple_properties(adev);
}
static void acpi_destroy_nondev_subnodes(struct list_head *list)
@@ -418,7 +423,7 @@ void acpi_free_properties(struct acpi_device *adev)
* %-EINVAL if the property doesn't exist,
* %-EPROTO if the property value type doesn't match @type.
*/
-static int acpi_data_get_property(struct acpi_device_data *data,
+static int acpi_data_get_property(const struct acpi_device_data *data,
const char *name, acpi_object_type type,
const union acpi_object **obj)
{
@@ -460,20 +465,21 @@ static int acpi_data_get_property(struct acpi_device_data *data,
* @type: Expected property type.
* @obj: Location to store the property value (if not %NULL).
*/
-int acpi_dev_get_property(struct acpi_device *adev, const char *name,
+int acpi_dev_get_property(const struct acpi_device *adev, const char *name,
acpi_object_type type, const union acpi_object **obj)
{
return adev ? acpi_data_get_property(&adev->data, name, type, obj) : -EINVAL;
}
EXPORT_SYMBOL_GPL(acpi_dev_get_property);
-static struct acpi_device_data *acpi_device_data_of_node(struct fwnode_handle *fwnode)
+static const struct acpi_device_data *
+acpi_device_data_of_node(const struct fwnode_handle *fwnode)
{
- if (fwnode->type == FWNODE_ACPI) {
- struct acpi_device *adev = to_acpi_device_node(fwnode);
+ if (is_acpi_device_node(fwnode)) {
+ const struct acpi_device *adev = to_acpi_device_node(fwnode);
return &adev->data;
- } else if (fwnode->type == FWNODE_ACPI_DATA) {
- struct acpi_data_node *dn = to_acpi_data_node(fwnode);
+ } else if (is_acpi_data_node(fwnode)) {
+ const struct acpi_data_node *dn = to_acpi_data_node(fwnode);
return &dn->data;
}
return NULL;
@@ -485,8 +491,8 @@ static struct acpi_device_data *acpi_device_data_of_node(struct fwnode_handle *f
* @propname: Name of the property.
* @valptr: Location to store a pointer to the property value (if not %NULL).
*/
-int acpi_node_prop_get(struct fwnode_handle *fwnode, const char *propname,
- void **valptr)
+int acpi_node_prop_get(const struct fwnode_handle *fwnode,
+ const char *propname, void **valptr)
{
return acpi_data_get_property(acpi_device_data_of_node(fwnode),
propname, ACPI_TYPE_ANY,
@@ -512,7 +518,7 @@ int acpi_node_prop_get(struct fwnode_handle *fwnode, const char *propname,
* %-EPROTO if the property is not a package or the type of its elements
* doesn't match @type.
*/
-static int acpi_data_get_property_array(struct acpi_device_data *data,
+static int acpi_data_get_property_array(const struct acpi_device_data *data,
const char *name,
acpi_object_type type,
const union acpi_object **obj)
@@ -572,13 +578,13 @@ static int acpi_data_get_property_array(struct acpi_device_data *data,
*
* Return: %0 on success, negative error code on failure.
*/
-int __acpi_node_get_property_reference(struct fwnode_handle *fwnode,
+int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
const char *propname, size_t index, size_t num_args,
struct acpi_reference_args *args)
{
const union acpi_object *element, *end;
const union acpi_object *obj;
- struct acpi_device_data *data;
+ const struct acpi_device_data *data;
struct acpi_device *device;
int ret, idx = 0;
@@ -674,7 +680,7 @@ int __acpi_node_get_property_reference(struct fwnode_handle *fwnode,
}
EXPORT_SYMBOL_GPL(__acpi_node_get_property_reference);
-static int acpi_data_prop_read_single(struct acpi_device_data *data,
+static int acpi_data_prop_read_single(const struct acpi_device_data *data,
const char *propname,
enum dev_prop_type proptype, void *val)
{
@@ -813,7 +819,7 @@ static int acpi_copy_property_array_string(const union acpi_object *items,
return nval;
}
-static int acpi_data_prop_read(struct acpi_device_data *data,
+static int acpi_data_prop_read(const struct acpi_device_data *data,
const char *propname,
enum dev_prop_type proptype,
void *val, size_t nval)
@@ -867,7 +873,7 @@ static int acpi_data_prop_read(struct acpi_device_data *data,
return ret;
}
-int acpi_dev_prop_read(struct acpi_device *adev, const char *propname,
+int acpi_dev_prop_read(const struct acpi_device *adev, const char *propname,
enum dev_prop_type proptype, void *val, size_t nval)
{
return adev ? acpi_data_prop_read(&adev->data, propname, proptype, val, nval) : -EINVAL;
@@ -885,8 +891,9 @@ int acpi_dev_prop_read(struct acpi_device *adev, const char *propname,
* of the property. Otherwise, read at most @nval values to the array at the
* location pointed to by @val.
*/
-int acpi_node_prop_read(struct fwnode_handle *fwnode, const char *propname,
- enum dev_prop_type proptype, void *val, size_t nval)
+int acpi_node_prop_read(const struct fwnode_handle *fwnode,
+ const char *propname, enum dev_prop_type proptype,
+ void *val, size_t nval)
{
return acpi_data_prop_read(acpi_device_data_of_node(fwnode),
propname, proptype, val, nval);
@@ -897,13 +904,15 @@ int acpi_node_prop_read(struct fwnode_handle *fwnode, const char *propname,
* @fwnode: Firmware node to find the next child node for.
* @child: Handle to one of the device's child nodes or a null handle.
*/
-struct fwnode_handle *acpi_get_next_subnode(struct fwnode_handle *fwnode,
+struct fwnode_handle *acpi_get_next_subnode(const struct fwnode_handle *fwnode,
struct fwnode_handle *child)
{
- struct acpi_device *adev = to_acpi_device_node(fwnode);
- struct list_head *head, *next;
+ const struct acpi_device *adev = to_acpi_device_node(fwnode);
+ struct acpi_device *child_adev = NULL;
+ const struct list_head *head;
+ struct list_head *next;
- if (!child || child->type == FWNODE_ACPI) {
+ if (!child || is_acpi_device_node(child)) {
if (adev)
head = &adev->children;
else
@@ -913,26 +922,27 @@ struct fwnode_handle *acpi_get_next_subnode(struct fwnode_handle *fwnode,
goto nondev;
if (child) {
- adev = to_acpi_device_node(child);
- next = adev->node.next;
+ child_adev = to_acpi_device_node(child);
+ next = child_adev->node.next;
if (next == head) {
child = NULL;
goto nondev;
}
- adev = list_entry(next, struct acpi_device, node);
+ child_adev = list_entry(next, struct acpi_device, node);
} else {
- adev = list_first_entry(head, struct acpi_device, node);
+ child_adev = list_first_entry(head, struct acpi_device,
+ node);
}
- return acpi_fwnode_handle(adev);
+ return acpi_fwnode_handle(child_adev);
}
nondev:
- if (!child || child->type == FWNODE_ACPI_DATA) {
- struct acpi_data_node *data = to_acpi_data_node(fwnode);
+ if (!child || is_acpi_data_node(child)) {
+ const struct acpi_data_node *data = to_acpi_data_node(fwnode);
struct acpi_data_node *dn;
- if (adev)
- head = &adev->data.subnodes;
+ if (child_adev)
+ head = &child_adev->data.subnodes;
else if (data)
head = &data->data.subnodes;
else
@@ -963,7 +973,7 @@ struct fwnode_handle *acpi_get_next_subnode(struct fwnode_handle *fwnode,
* Returns parent node of an ACPI device or data firmware node or %NULL if
* not available.
*/
-struct fwnode_handle *acpi_node_get_parent(struct fwnode_handle *fwnode)
+struct fwnode_handle *acpi_node_get_parent(const struct fwnode_handle *fwnode)
{
if (is_acpi_data_node(fwnode)) {
/* All data nodes have parent pointer so just return that */
@@ -992,8 +1002,8 @@ struct fwnode_handle *acpi_node_get_parent(struct fwnode_handle *fwnode)
* %NULL if there is no next endpoint, ERR_PTR() in case of error. In case
* of success the next endpoint is returned.
*/
-struct fwnode_handle *acpi_graph_get_next_endpoint(struct fwnode_handle *fwnode,
- struct fwnode_handle *prev)
+struct fwnode_handle *acpi_graph_get_next_endpoint(
+ const struct fwnode_handle *fwnode, struct fwnode_handle *prev)
{
struct fwnode_handle *port = NULL;
struct fwnode_handle *endpoint;
@@ -1040,14 +1050,15 @@ struct fwnode_handle *acpi_graph_get_next_endpoint(struct fwnode_handle *fwnode,
* the child node on success, NULL otherwise.
*/
static struct fwnode_handle *acpi_graph_get_child_prop_value(
- struct fwnode_handle *fwnode, const char *prop_name, unsigned int val)
+ const struct fwnode_handle *fwnode, const char *prop_name,
+ unsigned int val)
{
struct fwnode_handle *child;
fwnode_for_each_child_node(fwnode, child) {
u32 nr;
- if (!fwnode_property_read_u32(fwnode, prop_name, &nr))
+ if (fwnode_property_read_u32(child, prop_name, &nr))
continue;
if (val == nr)
@@ -1069,17 +1080,18 @@ static struct fwnode_handle *acpi_graph_get_child_prop_value(
* fields requested by the caller. Returns %0 in case of success and
* negative errno otherwise.
*/
-int acpi_graph_get_remote_endpoint(struct fwnode_handle *fwnode,
+int acpi_graph_get_remote_endpoint(const struct fwnode_handle *__fwnode,
struct fwnode_handle **parent,
struct fwnode_handle **port,
struct fwnode_handle **endpoint)
{
+ struct fwnode_handle *fwnode;
unsigned int port_nr, endpoint_nr;
struct acpi_reference_args args;
int ret;
memset(&args, 0, sizeof(args));
- ret = acpi_node_get_property_reference(fwnode, "remote-endpoint", 0,
+ ret = acpi_node_get_property_reference(__fwnode, "remote-endpoint", 0,
&args);
if (ret)
return ret;
@@ -1121,7 +1133,7 @@ int acpi_graph_get_remote_endpoint(struct fwnode_handle *fwnode,
return 0;
}
-static bool acpi_fwnode_device_is_available(struct fwnode_handle *fwnode)
+static bool acpi_fwnode_device_is_available(const struct fwnode_handle *fwnode)
{
if (!is_acpi_device_node(fwnode))
return false;
@@ -1129,16 +1141,17 @@ static bool acpi_fwnode_device_is_available(struct fwnode_handle *fwnode)
return acpi_device_is_present(to_acpi_device_node(fwnode));
}
-static bool acpi_fwnode_property_present(struct fwnode_handle *fwnode,
+static bool acpi_fwnode_property_present(const struct fwnode_handle *fwnode,
const char *propname)
{
return !acpi_node_prop_get(fwnode, propname, NULL);
}
-static int acpi_fwnode_property_read_int_array(struct fwnode_handle *fwnode,
- const char *propname,
- unsigned int elem_size,
- void *val, size_t nval)
+static int
+acpi_fwnode_property_read_int_array(const struct fwnode_handle *fwnode,
+ const char *propname,
+ unsigned int elem_size, void *val,
+ size_t nval)
{
enum dev_prop_type type;
@@ -1162,16 +1175,17 @@ static int acpi_fwnode_property_read_int_array(struct fwnode_handle *fwnode,
return acpi_node_prop_read(fwnode, propname, type, val, nval);
}
-static int acpi_fwnode_property_read_string_array(struct fwnode_handle *fwnode,
- const char *propname,
- const char **val, size_t nval)
+static int
+acpi_fwnode_property_read_string_array(const struct fwnode_handle *fwnode,
+ const char *propname, const char **val,
+ size_t nval)
{
return acpi_node_prop_read(fwnode, propname, DEV_PROP_STRING,
val, nval);
}
static struct fwnode_handle *
-acpi_fwnode_get_named_child_node(struct fwnode_handle *fwnode,
+acpi_fwnode_get_named_child_node(const struct fwnode_handle *fwnode,
const char *childname)
{
struct fwnode_handle *child;
@@ -1187,8 +1201,34 @@ acpi_fwnode_get_named_child_node(struct fwnode_handle *fwnode,
return NULL;
}
+static int
+acpi_fwnode_get_reference_args(const struct fwnode_handle *fwnode,
+ const char *prop, const char *nargs_prop,
+ unsigned int args_count, unsigned int index,
+ struct fwnode_reference_args *args)
+{
+ struct acpi_reference_args acpi_args;
+ unsigned int i;
+ int ret;
+
+ ret = __acpi_node_get_property_reference(fwnode, prop, index,
+ args_count, &acpi_args);
+ if (ret < 0)
+ return ret;
+ if (!args)
+ return 0;
+
+ args->nargs = acpi_args.nargs;
+ args->fwnode = acpi_fwnode_handle(acpi_args.adev);
+
+ for (i = 0; i < NR_FWNODE_REFERENCE_ARGS; i++)
+ args->args[i] = i < acpi_args.nargs ? acpi_args.args[i] : 0;
+
+ return 0;
+}
+
static struct fwnode_handle *
-acpi_fwnode_graph_get_next_endpoint(struct fwnode_handle *fwnode,
+acpi_fwnode_graph_get_next_endpoint(const struct fwnode_handle *fwnode,
struct fwnode_handle *prev)
{
struct fwnode_handle *endpoint;
@@ -1201,7 +1241,7 @@ acpi_fwnode_graph_get_next_endpoint(struct fwnode_handle *fwnode,
}
static struct fwnode_handle *
-acpi_fwnode_graph_get_remote_endpoint(struct fwnode_handle *fwnode)
+acpi_fwnode_graph_get_remote_endpoint(const struct fwnode_handle *fwnode)
{
struct fwnode_handle *endpoint = NULL;
@@ -1210,7 +1250,13 @@ acpi_fwnode_graph_get_remote_endpoint(struct fwnode_handle *fwnode)
return endpoint;
}
-static int acpi_fwnode_graph_parse_endpoint(struct fwnode_handle *fwnode,
+static struct fwnode_handle *
+acpi_fwnode_get_parent(struct fwnode_handle *fwnode)
+{
+ return acpi_node_get_parent(fwnode);
+}
+
+static int acpi_fwnode_graph_parse_endpoint(const struct fwnode_handle *fwnode,
struct fwnode_endpoint *endpoint)
{
struct fwnode_handle *port_fwnode = fwnode_get_parent(fwnode);
@@ -1223,16 +1269,27 @@ static int acpi_fwnode_graph_parse_endpoint(struct fwnode_handle *fwnode,
return 0;
}
-const struct fwnode_operations acpi_fwnode_ops = {
- .device_is_available = acpi_fwnode_device_is_available,
- .property_present = acpi_fwnode_property_present,
- .property_read_int_array = acpi_fwnode_property_read_int_array,
- .property_read_string_array = acpi_fwnode_property_read_string_array,
- .get_parent = acpi_node_get_parent,
- .get_next_child_node = acpi_get_next_subnode,
- .get_named_child_node = acpi_fwnode_get_named_child_node,
- .graph_get_next_endpoint = acpi_fwnode_graph_get_next_endpoint,
- .graph_get_remote_endpoint = acpi_fwnode_graph_get_remote_endpoint,
- .graph_get_port_parent = acpi_node_get_parent,
- .graph_parse_endpoint = acpi_fwnode_graph_parse_endpoint,
-};
+#define DECLARE_ACPI_FWNODE_OPS(ops) \
+ const struct fwnode_operations ops = { \
+ .device_is_available = acpi_fwnode_device_is_available, \
+ .property_present = acpi_fwnode_property_present, \
+ .property_read_int_array = \
+ acpi_fwnode_property_read_int_array, \
+ .property_read_string_array = \
+ acpi_fwnode_property_read_string_array, \
+ .get_parent = acpi_node_get_parent, \
+ .get_next_child_node = acpi_get_next_subnode, \
+ .get_named_child_node = acpi_fwnode_get_named_child_node, \
+ .get_reference_args = acpi_fwnode_get_reference_args, \
+ .graph_get_next_endpoint = \
+ acpi_fwnode_graph_get_next_endpoint, \
+ .graph_get_remote_endpoint = \
+ acpi_fwnode_graph_get_remote_endpoint, \
+ .graph_get_port_parent = acpi_fwnode_get_parent, \
+ .graph_parse_endpoint = acpi_fwnode_graph_parse_endpoint, \
+ }; \
+ EXPORT_SYMBOL_GPL(ops)
+
+DECLARE_ACPI_FWNODE_OPS(acpi_device_fwnode_ops);
+DECLARE_ACPI_FWNODE_OPS(acpi_data_fwnode_ops);
+const struct fwnode_operations acpi_static_fwnode_ops;
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
index cd4c4271dc4c..d85e010ee2cc 100644
--- a/drivers/acpi/resource.c
+++ b/drivers/acpi/resource.c
@@ -573,6 +573,35 @@ static acpi_status acpi_dev_process_resource(struct acpi_resource *ares,
return AE_OK;
}
+static int __acpi_dev_get_resources(struct acpi_device *adev,
+ struct list_head *list,
+ int (*preproc)(struct acpi_resource *, void *),
+ void *preproc_data, char *method)
+{
+ struct res_proc_context c;
+ acpi_status status;
+
+ if (!adev || !adev->handle || !list_empty(list))
+ return -EINVAL;
+
+ if (!acpi_has_method(adev->handle, method))
+ return 0;
+
+ c.list = list;
+ c.preproc = preproc;
+ c.preproc_data = preproc_data;
+ c.count = 0;
+ c.error = 0;
+ status = acpi_walk_resources(adev->handle, method,
+ acpi_dev_process_resource, &c);
+ if (ACPI_FAILURE(status)) {
+ acpi_dev_free_resource_list(list);
+ return c.error ? c.error : -EIO;
+ }
+
+ return c.count;
+}
+
/**
* acpi_dev_get_resources - Get current resources of a device.
* @adev: ACPI device node to get the resources for.
@@ -601,30 +630,45 @@ int acpi_dev_get_resources(struct acpi_device *adev, struct list_head *list,
int (*preproc)(struct acpi_resource *, void *),
void *preproc_data)
{
- struct res_proc_context c;
- acpi_status status;
+ return __acpi_dev_get_resources(adev, list, preproc, preproc_data,
+ METHOD_NAME__CRS);
+}
+EXPORT_SYMBOL_GPL(acpi_dev_get_resources);
- if (!adev || !adev->handle || !list_empty(list))
- return -EINVAL;
+static int is_memory(struct acpi_resource *ares, void *not_used)
+{
+ struct resource_win win;
+ struct resource *res = &win.res;
- if (!acpi_has_method(adev->handle, METHOD_NAME__CRS))
- return 0;
+ memset(&win, 0, sizeof(win));
- c.list = list;
- c.preproc = preproc;
- c.preproc_data = preproc_data;
- c.count = 0;
- c.error = 0;
- status = acpi_walk_resources(adev->handle, METHOD_NAME__CRS,
- acpi_dev_process_resource, &c);
- if (ACPI_FAILURE(status)) {
- acpi_dev_free_resource_list(list);
- return c.error ? c.error : -EIO;
- }
+ return !(acpi_dev_resource_memory(ares, res)
+ || acpi_dev_resource_address_space(ares, &win)
+ || acpi_dev_resource_ext_address_space(ares, &win));
+}
- return c.count;
+/**
+ * acpi_dev_get_dma_resources - Get current DMA resources of a device.
+ * @adev: ACPI device node to get the resources for.
+ * @list: Head of the resultant list of resources (must be empty).
+ *
+ * Evaluate the _DMA method for the given device node and process its
+ * output.
+ *
+ * The resultant struct resource objects are put on the list pointed to
+ * by @list, that must be empty initially, as members of struct
+ * resource_entry objects. Callers of this routine should use
+ * %acpi_dev_free_resource_list() to free that list.
+ *
+ * The number of resources in the output list is returned on success,
+ * an error code reflecting the error condition is returned otherwise.
+ */
+int acpi_dev_get_dma_resources(struct acpi_device *adev, struct list_head *list)
+{
+ return __acpi_dev_get_resources(adev, list, is_memory, NULL,
+ METHOD_NAME__DMA);
}
-EXPORT_SYMBOL_GPL(acpi_dev_get_resources);
+EXPORT_SYMBOL_GPL(acpi_dev_get_dma_resources);
/**
* acpi_dev_filter_resource_type - Filter ACPI resource according to resource
diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c
index ad0b13ad4bbb..a2428e9462dd 100644
--- a/drivers/acpi/sbs.c
+++ b/drivers/acpi/sbs.c
@@ -31,7 +31,7 @@
#include <linux/jiffies.h>
#include <linux/delay.h>
#include <linux/power_supply.h>
-#include <linux/dmi.h>
+#include <linux/platform_data/x86/apple.h>
#include "sbshc.h"
#include "battery.h"
@@ -58,8 +58,6 @@ static unsigned int cache_time = 1000;
module_param(cache_time, uint, 0644);
MODULE_PARM_DESC(cache_time, "cache time in milliseconds");
-static bool sbs_manager_broken;
-
#define MAX_SBS_BAT 4
#define ACPI_SBS_BLOCK_MAX 32
@@ -476,7 +474,7 @@ static ssize_t acpi_battery_alarm_store(struct device *dev,
return count;
}
-static struct device_attribute alarm_attr = {
+static const struct device_attribute alarm_attr = {
.attr = {.name = "alarm", .mode = 0644},
.show = acpi_battery_alarm_show,
.store = acpi_battery_alarm_store,
@@ -632,31 +630,12 @@ static void acpi_sbs_callback(void *context)
}
}
-static int disable_sbs_manager(const struct dmi_system_id *d)
-{
- sbs_manager_broken = true;
- return 0;
-}
-
-static struct dmi_system_id acpi_sbs_dmi_table[] = {
- {
- .callback = disable_sbs_manager,
- .ident = "Apple",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc.")
- },
- },
- { },
-};
-
static int acpi_sbs_add(struct acpi_device *device)
{
struct acpi_sbs *sbs;
int result = 0;
int id;
- dmi_check_system(acpi_sbs_dmi_table);
-
sbs = kzalloc(sizeof(struct acpi_sbs), GFP_KERNEL);
if (!sbs) {
result = -ENOMEM;
@@ -677,7 +656,7 @@ static int acpi_sbs_add(struct acpi_device *device)
result = 0;
- if (!sbs_manager_broken) {
+ if (!x86_apple_machine) {
result = acpi_manager_get_info(sbs);
if (!result) {
sbs->manager_present = 1;
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 70fd5502c284..602f8ff212f2 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -13,6 +13,7 @@
#include <linux/dmi.h>
#include <linux/nls.h>
#include <linux/dma-mapping.h>
+#include <linux/platform_data/x86/apple.h>
#include <asm/pgtable.h>
@@ -1360,6 +1361,85 @@ enum dev_dma_attr acpi_get_dma_attr(struct acpi_device *adev)
}
/**
+ * acpi_dma_get_range() - Get device DMA parameters.
+ *
+ * @dev: device to configure
+ * @dma_addr: pointer device DMA address result
+ * @offset: pointer to the DMA offset result
+ * @size: pointer to DMA range size result
+ *
+ * Evaluate DMA regions and return respectively DMA region start, offset
+ * and size in dma_addr, offset and size on parsing success; it does not
+ * update the passed in values on failure.
+ *
+ * Return 0 on success, < 0 on failure.
+ */
+int acpi_dma_get_range(struct device *dev, u64 *dma_addr, u64 *offset,
+ u64 *size)
+{
+ struct acpi_device *adev;
+ LIST_HEAD(list);
+ struct resource_entry *rentry;
+ int ret;
+ struct device *dma_dev = dev;
+ u64 len, dma_start = U64_MAX, dma_end = 0, dma_offset = 0;
+
+ /*
+ * Walk the device tree chasing an ACPI companion with a _DMA
+ * object while we go. Stop if we find a device with an ACPI
+ * companion containing a _DMA method.
+ */
+ do {
+ adev = ACPI_COMPANION(dma_dev);
+ if (adev && acpi_has_method(adev->handle, METHOD_NAME__DMA))
+ break;
+
+ dma_dev = dma_dev->parent;
+ } while (dma_dev);
+
+ if (!dma_dev)
+ return -ENODEV;
+
+ if (!acpi_has_method(adev->handle, METHOD_NAME__CRS)) {
+ acpi_handle_warn(adev->handle, "_DMA is valid only if _CRS is present\n");
+ return -EINVAL;
+ }
+
+ ret = acpi_dev_get_dma_resources(adev, &list);
+ if (ret > 0) {
+ list_for_each_entry(rentry, &list, node) {
+ if (dma_offset && rentry->offset != dma_offset) {
+ ret = -EINVAL;
+ dev_warn(dma_dev, "Can't handle multiple windows with different offsets\n");
+ goto out;
+ }
+ dma_offset = rentry->offset;
+
+ /* Take lower and upper limits */
+ if (rentry->res->start < dma_start)
+ dma_start = rentry->res->start;
+ if (rentry->res->end > dma_end)
+ dma_end = rentry->res->end;
+ }
+
+ if (dma_start >= dma_end) {
+ ret = -EINVAL;
+ dev_dbg(dma_dev, "Invalid DMA regions configuration\n");
+ goto out;
+ }
+
+ *dma_addr = dma_start - dma_offset;
+ len = dma_end - dma_start;
+ *size = max(len, len + 1);
+ *offset = dma_offset;
+ }
+ out:
+ acpi_dev_free_resource_list(&list);
+
+ return ret >= 0 ? 0 : ret;
+}
+
+/**
* acpi_dma_configure - Set-up DMA configuration for the device.
* @dev: The pointer to the device
* @attr: device dma attributes
@@ -1367,20 +1447,16 @@ enum dev_dma_attr acpi_get_dma_attr(struct acpi_device *adev)
int acpi_dma_configure(struct device *dev, enum dev_dma_attr attr)
{
const struct iommu_ops *iommu;
- u64 size;
+ u64 dma_addr = 0, size = 0;
- iort_set_dma_mask(dev);
+ iort_dma_setup(dev, &dma_addr, &size);
iommu = iort_iommu_configure(dev);
if (IS_ERR(iommu) && PTR_ERR(iommu) == -EPROBE_DEFER)
return -EPROBE_DEFER;
- size = max(dev->coherent_dma_mask, dev->coherent_dma_mask + 1);
- /*
- * Assume dma valid range starts at 0 and covers the whole
- * coherent_dma_mask.
- */
- arch_setup_dma_ops(dev, 0, size, iommu, attr == DEV_DMA_COHERENT);
+ arch_setup_dma_ops(dev, dma_addr, size,
+ iommu, attr == DEV_DMA_COHERENT);
return 0;
}
@@ -1452,6 +1528,12 @@ static bool acpi_is_spi_i2c_slave(struct acpi_device *device)
struct list_head resource_list;
bool is_spi_i2c_slave = false;
+ /* Macs use device properties in lieu of _CRS resources */
+ if (x86_apple_machine &&
+ (fwnode_property_present(&device->fwnode, "spiSclkPeriod") ||
+ fwnode_property_present(&device->fwnode, "i2cAddress")))
+ return true;
+
INIT_LIST_HEAD(&resource_list);
acpi_dev_get_resources(device, &resource_list, acpi_check_spi_i2c_slave,
&is_spi_i2c_slave);
@@ -1467,8 +1549,7 @@ void acpi_init_device_object(struct acpi_device *device, acpi_handle handle,
device->device_type = type;
device->handle = handle;
device->parent = acpi_bus_get_parent(handle);
- device->fwnode.type = FWNODE_ACPI;
- device->fwnode.ops = &acpi_fwnode_ops;
+ device->fwnode.ops = &acpi_device_fwnode_ops;
acpi_set_device_status(device, sta);
acpi_device_get_busid(device);
acpi_set_pnp_ids(handle, &device->pnp, type);
@@ -2058,6 +2139,9 @@ int __init acpi_scan_init(void)
acpi_get_spcr_uart_addr();
}
+ acpi_gpe_apply_masked_gpes();
+ acpi_update_all_gpes();
+
mutex_lock(&acpi_scan_lock);
/*
* Enumerate devices in the ACPI namespace.
@@ -2082,9 +2166,6 @@ int __init acpi_scan_init(void)
}
}
- acpi_gpe_apply_masked_gpes();
- acpi_update_all_gpes();
-
acpi_scan_initialized = true;
out:
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index fa8243c5c062..6804ddab3052 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -160,7 +160,7 @@ static int __init init_nvs_nosave(const struct dmi_system_id *d)
return 0;
}
-static struct dmi_system_id acpisleep_dmi_table[] __initdata = {
+static const struct dmi_system_id acpisleep_dmi_table[] __initconst = {
{
.callback = init_old_suspend_ordering,
.ident = "Abit KN9 (nForce4 variant)",
@@ -669,6 +669,7 @@ static const struct acpi_device_id lps0_device_ids[] = {
#define ACPI_LPS0_DSM_UUID "c4eb40a0-6cd2-11e2-bcfd-0800200c9a66"
+#define ACPI_LPS0_GET_DEVICE_CONSTRAINTS 1
#define ACPI_LPS0_SCREEN_OFF 3
#define ACPI_LPS0_SCREEN_ON 4
#define ACPI_LPS0_ENTRY 5
@@ -680,6 +681,166 @@ static acpi_handle lps0_device_handle;
static guid_t lps0_dsm_guid;
static char lps0_dsm_func_mask;
+/* Device constraint entry structure */
+struct lpi_device_info {
+ char *name;
+ int enabled;
+ union acpi_object *package;
+};
+
+/* Constraint package structure */
+struct lpi_device_constraint {
+ int uid;
+ int min_dstate;
+ int function_states;
+};
+
+struct lpi_constraints {
+ acpi_handle handle;
+ int min_dstate;
+};
+
+static struct lpi_constraints *lpi_constraints_table;
+static int lpi_constraints_table_size;
+
+static void lpi_device_get_constraints(void)
+{
+ union acpi_object *out_obj;
+ int i;
+
+ out_obj = acpi_evaluate_dsm_typed(lps0_device_handle, &lps0_dsm_guid,
+ 1, ACPI_LPS0_GET_DEVICE_CONSTRAINTS,
+ NULL, ACPI_TYPE_PACKAGE);
+
+ acpi_handle_debug(lps0_device_handle, "_DSM function 1 eval %s\n",
+ out_obj ? "successful" : "failed");
+
+ if (!out_obj)
+ return;
+
+ lpi_constraints_table = kcalloc(out_obj->package.count,
+ sizeof(*lpi_constraints_table),
+ GFP_KERNEL);
+ if (!lpi_constraints_table)
+ goto free_acpi_buffer;
+
+ acpi_handle_debug(lps0_device_handle, "LPI: constraints list begin:\n");
+
+ for (i = 0; i < out_obj->package.count; i++) {
+ struct lpi_constraints *constraint;
+ acpi_status status;
+ union acpi_object *package = &out_obj->package.elements[i];
+ struct lpi_device_info info = { };
+ int package_count = 0, j;
+
+ if (!package)
+ continue;
+
+ for (j = 0; j < package->package.count; ++j) {
+ union acpi_object *element =
+ &(package->package.elements[j]);
+
+ switch (element->type) {
+ case ACPI_TYPE_INTEGER:
+ info.enabled = element->integer.value;
+ break;
+ case ACPI_TYPE_STRING:
+ info.name = element->string.pointer;
+ break;
+ case ACPI_TYPE_PACKAGE:
+ package_count = element->package.count;
+ info.package = element->package.elements;
+ break;
+ }
+ }
+
+ if (!info.enabled || !info.package || !info.name)
+ continue;
+
+ constraint = &lpi_constraints_table[lpi_constraints_table_size];
+
+ status = acpi_get_handle(NULL, info.name, &constraint->handle);
+ if (ACPI_FAILURE(status))
+ continue;
+
+ acpi_handle_debug(lps0_device_handle,
+ "index:%d Name:%s\n", i, info.name);
+
+ constraint->min_dstate = -1;
+
+ for (j = 0; j < package_count; ++j) {
+ union acpi_object *info_obj = &info.package[j];
+ union acpi_object *cnstr_pkg;
+ union acpi_object *obj;
+ struct lpi_device_constraint dev_info;
+
+ switch (info_obj->type) {
+ case ACPI_TYPE_INTEGER:
+ /* version */
+ break;
+ case ACPI_TYPE_PACKAGE:
+ if (info_obj->package.count < 2)
+ break;
+
+ cnstr_pkg = info_obj->package.elements;
+ obj = &cnstr_pkg[0];
+ dev_info.uid = obj->integer.value;
+ obj = &cnstr_pkg[1];
+ dev_info.min_dstate = obj->integer.value;
+
+ acpi_handle_debug(lps0_device_handle,
+ "uid:%d min_dstate:%s\n",
+ dev_info.uid,
+ acpi_power_state_string(dev_info.min_dstate));
+
+ constraint->min_dstate = dev_info.min_dstate;
+ break;
+ }
+ }
+
+ if (constraint->min_dstate < 0) {
+ acpi_handle_debug(lps0_device_handle,
+ "Incomplete constraint defined\n");
+ continue;
+ }
+
+ lpi_constraints_table_size++;
+ }
+
+ acpi_handle_debug(lps0_device_handle, "LPI: constraints list end\n");
+
+free_acpi_buffer:
+ ACPI_FREE(out_obj);
+}
+
+static void lpi_check_constraints(void)
+{
+ int i;
+
+ for (i = 0; i < lpi_constraints_table_size; ++i) {
+ struct acpi_device *adev;
+
+ if (acpi_bus_get_device(lpi_constraints_table[i].handle, &adev))
+ continue;
+
+ acpi_handle_debug(adev->handle,
+ "LPI: required min power state:%s current power state:%s\n",
+ acpi_power_state_string(lpi_constraints_table[i].min_dstate),
+ acpi_power_state_string(adev->power.state));
+
+ if (!adev->flags.power_manageable) {
+ acpi_handle_info(adev->handle, "LPI: Device not power manageble\n");
+ continue;
+ }
+
+ if (adev->power.state < lpi_constraints_table[i].min_dstate)
+ acpi_handle_info(adev->handle,
+ "LPI: Constraint not met; min power state:%s current power state:%s\n",
+ acpi_power_state_string(lpi_constraints_table[i].min_dstate),
+ acpi_power_state_string(adev->power.state));
+ }
+}
+
static void acpi_sleep_run_lps0_dsm(unsigned int func)
{
union acpi_object *out_obj;
@@ -714,6 +875,12 @@ static int lps0_device_attach(struct acpi_device *adev,
if ((bitmask & ACPI_S2IDLE_FUNC_MASK) == ACPI_S2IDLE_FUNC_MASK) {
lps0_dsm_func_mask = bitmask;
lps0_device_handle = adev->handle;
+ /*
+ * Use suspend-to-idle by default if the default
+ * suspend mode was not set from the command line.
+ */
+ if (mem_sleep_default > PM_SUSPEND_MEM)
+ mem_sleep_current = PM_SUSPEND_TO_IDLE;
}
acpi_handle_debug(adev->handle, "_DSM function mask: 0x%x\n",
@@ -723,6 +890,9 @@ static int lps0_device_attach(struct acpi_device *adev,
"_DSM function 0 evaluation failed\n");
}
ACPI_FREE(out_obj);
+
+ lpi_device_get_constraints();
+
return 0;
}
@@ -731,14 +901,14 @@ static struct acpi_scan_handler lps0_handler = {
.attach = lps0_device_attach,
};
-static int acpi_freeze_begin(void)
+static int acpi_s2idle_begin(void)
{
acpi_scan_lock_acquire();
s2idle_in_progress = true;
return 0;
}
-static int acpi_freeze_prepare(void)
+static int acpi_s2idle_prepare(void)
{
if (lps0_device_handle) {
acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_OFF);
@@ -758,8 +928,12 @@ static int acpi_freeze_prepare(void)
return 0;
}
-static void acpi_freeze_wake(void)
+static void acpi_s2idle_wake(void)
{
+
+ if (pm_debug_messages_on)
+ lpi_check_constraints();
+
/*
* If IRQD_WAKEUP_ARMED is not set for the SCI at this point, it means
* that the SCI has triggered while suspended, so cancel the wakeup in
@@ -772,7 +946,7 @@ static void acpi_freeze_wake(void)
}
}
-static void acpi_freeze_sync(void)
+static void acpi_s2idle_sync(void)
{
/*
* Process all pending events in case there are any wakeup ones.
@@ -785,7 +959,7 @@ static void acpi_freeze_sync(void)
s2idle_wakeup = false;
}
-static void acpi_freeze_restore(void)
+static void acpi_s2idle_restore(void)
{
if (acpi_sci_irq_valid())
disable_irq_wake(acpi_sci_irq);
@@ -798,19 +972,19 @@ static void acpi_freeze_restore(void)
}
}
-static void acpi_freeze_end(void)
+static void acpi_s2idle_end(void)
{
s2idle_in_progress = false;
acpi_scan_lock_release();
}
-static const struct platform_freeze_ops acpi_freeze_ops = {
- .begin = acpi_freeze_begin,
- .prepare = acpi_freeze_prepare,
- .wake = acpi_freeze_wake,
- .sync = acpi_freeze_sync,
- .restore = acpi_freeze_restore,
- .end = acpi_freeze_end,
+static const struct platform_s2idle_ops acpi_s2idle_ops = {
+ .begin = acpi_s2idle_begin,
+ .prepare = acpi_s2idle_prepare,
+ .wake = acpi_s2idle_wake,
+ .sync = acpi_s2idle_sync,
+ .restore = acpi_s2idle_restore,
+ .end = acpi_s2idle_end,
};
static void acpi_sleep_suspend_setup(void)
@@ -825,7 +999,7 @@ static void acpi_sleep_suspend_setup(void)
&acpi_suspend_ops_old : &acpi_suspend_ops);
acpi_scan_add_handler(&lps0_handler);
- freeze_set_ops(&acpi_freeze_ops);
+ s2idle_set_ops(&acpi_s2idle_ops);
}
#else /* !CONFIG_SUSPEND */
@@ -870,7 +1044,7 @@ static struct syscore_ops acpi_sleep_syscore_ops = {
.resume = acpi_restore_bm_rld,
};
-void acpi_sleep_syscore_init(void)
+static void acpi_sleep_syscore_init(void)
{
register_syscore_ops(&acpi_sleep_syscore_ops);
}
diff --git a/drivers/acpi/spcr.c b/drivers/acpi/spcr.c
index 98aa8c808a33..324b35bfe781 100644
--- a/drivers/acpi/spcr.c
+++ b/drivers/acpi/spcr.c
@@ -53,17 +53,24 @@ static bool qdf2400_erratum_44_present(struct acpi_table_header *h)
*/
static bool xgene_8250_erratum_present(struct acpi_table_spcr *tb)
{
+ bool xgene_8250 = false;
+
if (tb->interface_type != ACPI_DBG2_16550_COMPATIBLE)
return false;
- if (memcmp(tb->header.oem_id, "APMC0D", ACPI_OEM_ID_SIZE))
+ if (memcmp(tb->header.oem_id, "APMC0D", ACPI_OEM_ID_SIZE) &&
+ memcmp(tb->header.oem_id, "HPE ", ACPI_OEM_ID_SIZE))
return false;
if (!memcmp(tb->header.oem_table_id, "XGENESPC",
ACPI_OEM_TABLE_ID_SIZE) && tb->header.oem_revision == 0)
- return true;
+ xgene_8250 = true;
- return false;
+ if (!memcmp(tb->header.oem_table_id, "ProLiant",
+ ACPI_OEM_TABLE_ID_SIZE) && tb->header.oem_revision == 1)
+ xgene_8250 = true;
+
+ return xgene_8250;
}
/**
@@ -105,16 +112,17 @@ int __init parse_spcr(bool earlycon)
}
if (table->serial_port.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
- switch (table->serial_port.access_width) {
+ switch (ACPI_ACCESS_BIT_WIDTH((
+ table->serial_port.access_width))) {
default:
pr_err("Unexpected SPCR Access Width. Defaulting to byte size\n");
- case ACPI_ACCESS_SIZE_BYTE:
+ case 8:
iotype = "mmio";
break;
- case ACPI_ACCESS_SIZE_WORD:
+ case 16:
iotype = "mmio16";
break;
- case ACPI_ACCESS_SIZE_DWORD:
+ case 32:
iotype = "mmio32";
break;
}
@@ -181,11 +189,19 @@ int __init parse_spcr(bool earlycon)
uart = "qdf2400_e44";
}
- if (xgene_8250_erratum_present(table))
+ if (xgene_8250_erratum_present(table)) {
iotype = "mmio32";
- snprintf(opts, sizeof(opts), "%s,%s,0x%llx,%d", uart, iotype,
- table->serial_port.address, baud_rate);
+ /* for xgene v1 and v2 we don't know the clock rate of the
+ * UART so don't attempt to change to the baud rate state
+ * in the table because driver cannot calculate the dividers
+ */
+ snprintf(opts, sizeof(opts), "%s,%s,0x%llx", uart, iotype,
+ table->serial_port.address);
+ } else {
+ snprintf(opts, sizeof(opts), "%s,%s,0x%llx,%d", uart, iotype,
+ table->serial_port.address, baud_rate);
+ }
pr_info("console: %s\n", opts);
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
index e414fabf7315..78a5a23010ab 100644
--- a/drivers/acpi/sysfs.c
+++ b/drivers/acpi/sysfs.c
@@ -2,6 +2,8 @@
* sysfs.c - ACPI sysfs interface to userspace.
*/
+#define pr_fmt(fmt) "ACPI: " fmt
+
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/moduleparam.h>
@@ -306,11 +308,13 @@ module_param_call(acpica_version, NULL, param_get_acpica_version, NULL, 0444);
/*
* ACPI table sysfs I/F:
* /sys/firmware/acpi/tables/
+ * /sys/firmware/acpi/tables/data/
* /sys/firmware/acpi/tables/dynamic/
*/
static LIST_HEAD(acpi_table_attr_list);
static struct kobject *tables_kobj;
+static struct kobject *tables_data_kobj;
static struct kobject *dynamic_tables_kobj;
static struct kobject *hotplug_kobj;
@@ -325,6 +329,11 @@ struct acpi_table_attr {
struct list_head node;
};
+struct acpi_data_attr {
+ struct bin_attribute attr;
+ u64 addr;
+};
+
static ssize_t acpi_table_show(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr, char *buf,
loff_t offset, size_t count)
@@ -420,6 +429,70 @@ acpi_status acpi_sysfs_table_handler(u32 event, void *table, void *context)
return AE_OK;
}
+static ssize_t acpi_data_show(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf,
+ loff_t offset, size_t count)
+{
+ struct acpi_data_attr *data_attr;
+ void __iomem *base;
+ ssize_t rc;
+
+ data_attr = container_of(bin_attr, struct acpi_data_attr, attr);
+
+ base = acpi_os_map_memory(data_attr->addr, data_attr->attr.size);
+ if (!base)
+ return -ENOMEM;
+ rc = memory_read_from_buffer(buf, count, &offset, base,
+ data_attr->attr.size);
+ acpi_os_unmap_memory(base, data_attr->attr.size);
+
+ return rc;
+}
+
+static int acpi_bert_data_init(void *th, struct acpi_data_attr *data_attr)
+{
+ struct acpi_table_bert *bert = th;
+
+ if (bert->header.length < sizeof(struct acpi_table_bert) ||
+ bert->region_length < sizeof(struct acpi_hest_generic_status)) {
+ kfree(data_attr);
+ return -EINVAL;
+ }
+ data_attr->addr = bert->address;
+ data_attr->attr.size = bert->region_length;
+ data_attr->attr.attr.name = "BERT";
+
+ return sysfs_create_bin_file(tables_data_kobj, &data_attr->attr);
+}
+
+static struct acpi_data_obj {
+ char *name;
+ int (*fn)(void *, struct acpi_data_attr *);
+} acpi_data_objs[] = {
+ { ACPI_SIG_BERT, acpi_bert_data_init },
+};
+
+#define NUM_ACPI_DATA_OBJS ARRAY_SIZE(acpi_data_objs)
+
+static int acpi_table_data_init(struct acpi_table_header *th)
+{
+ struct acpi_data_attr *data_attr;
+ int i;
+
+ for (i = 0; i < NUM_ACPI_DATA_OBJS; i++) {
+ if (ACPI_COMPARE_NAME(th->signature, acpi_data_objs[i].name)) {
+ data_attr = kzalloc(sizeof(*data_attr), GFP_KERNEL);
+ if (!data_attr)
+ return -ENOMEM;
+ sysfs_attr_init(&data_attr->attr.attr);
+ data_attr->attr.read = acpi_data_show;
+ data_attr->attr.attr.mode = 0400;
+ return acpi_data_objs[i].fn(th, data_attr);
+ }
+ }
+ return 0;
+}
+
static int acpi_tables_sysfs_init(void)
{
struct acpi_table_attr *table_attr;
@@ -432,6 +505,10 @@ static int acpi_tables_sysfs_init(void)
if (!tables_kobj)
goto err;
+ tables_data_kobj = kobject_create_and_add("data", tables_kobj);
+ if (!tables_data_kobj)
+ goto err_tables_data;
+
dynamic_tables_kobj = kobject_create_and_add("dynamic", tables_kobj);
if (!dynamic_tables_kobj)
goto err_dynamic_tables;
@@ -456,13 +533,17 @@ static int acpi_tables_sysfs_init(void)
return ret;
}
list_add_tail(&table_attr->node, &acpi_table_attr_list);
+ acpi_table_data_init(table_header);
}
kobject_uevent(tables_kobj, KOBJ_ADD);
+ kobject_uevent(tables_data_kobj, KOBJ_ADD);
kobject_uevent(dynamic_tables_kobj, KOBJ_ADD);
return 0;
err_dynamic_tables:
+ kobject_put(tables_data_kobj);
+err_tables_data:
kobject_put(tables_kobj);
err:
return -ENOMEM;
@@ -552,11 +633,15 @@ static void fixed_event_count(u32 event_number)
static void acpi_global_event_handler(u32 event_type, acpi_handle device,
u32 event_number, void *context)
{
- if (event_type == ACPI_EVENT_TYPE_GPE)
+ if (event_type == ACPI_EVENT_TYPE_GPE) {
gpe_count(event_number);
-
- if (event_type == ACPI_EVENT_TYPE_FIXED)
+ pr_debug("GPE event 0x%02x\n", event_number);
+ } else if (event_type == ACPI_EVENT_TYPE_FIXED) {
fixed_event_count(event_number);
+ pr_debug("Fixed event 0x%02x\n", event_number);
+ } else {
+ pr_debug("Other event 0x%02x\n", event_number);
+ }
}
static int get_status(u32 index, acpi_event_status *status,
diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c
index ff425390bfa8..80ce2a7d224b 100644
--- a/drivers/acpi/tables.c
+++ b/drivers/acpi/tables.c
@@ -740,10 +740,10 @@ int __init acpi_table_init(void)
if (acpi_verify_table_checksum) {
pr_info("Early table checksum verification enabled\n");
- acpi_gbl_verify_table_checksum = TRUE;
+ acpi_gbl_enable_table_validation = TRUE;
} else {
pr_info("Early table checksum verification disabled\n");
- acpi_gbl_verify_table_checksum = FALSE;
+ acpi_gbl_enable_table_validation = FALSE;
}
status = acpi_initialize_tables(initial_tables, ACPI_MAX_TABLES, 0);
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index 1d0417b87cb7..551b71a24b85 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -1209,7 +1209,7 @@ static int thermal_psv(const struct dmi_system_id *d) {
return 0;
}
-static struct dmi_system_id thermal_dmi_table[] __initdata = {
+static const struct dmi_system_id thermal_dmi_table[] __initconst = {
/*
* Award BIOS on this AOpen makes thermal control almost worthless.
* http://bugzilla.kernel.org/show_bug.cgi?id=8842
diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c
index b9d956c916f5..0a9e5979aaa9 100644
--- a/drivers/acpi/utils.c
+++ b/drivers/acpi/utils.c
@@ -816,3 +816,39 @@ static int __init acpi_backlight(char *str)
return 1;
}
__setup("acpi_backlight=", acpi_backlight);
+
+/**
+ * acpi_match_platform_list - Check if the system matches with a given list
+ * @plat: pointer to acpi_platform_list table terminated by a NULL entry
+ *
+ * Return the matched index if the system is found in the platform list.
+ * Otherwise, return a negative error code.
+ */
+int acpi_match_platform_list(const struct acpi_platform_list *plat)
+{
+ struct acpi_table_header hdr;
+ int idx = 0;
+
+ if (acpi_disabled)
+ return -ENODEV;
+
+ for (; plat->oem_id[0]; plat++, idx++) {
+ if (ACPI_FAILURE(acpi_get_table_header(plat->table, 0, &hdr)))
+ continue;
+
+ if (strncmp(plat->oem_id, hdr.oem_id, ACPI_OEM_ID_SIZE))
+ continue;
+
+ if (strncmp(plat->oem_table_id, hdr.oem_table_id, ACPI_OEM_TABLE_ID_SIZE))
+ continue;
+
+ if ((plat->pred == all_versions) ||
+ (plat->pred == less_than_or_equal && hdr.oem_revision <= plat->oem_revision) ||
+ (plat->pred == greater_than_or_equal && hdr.oem_revision >= plat->oem_revision) ||
+ (plat->pred == equal && hdr.oem_revision == plat->oem_revision))
+ return idx;
+ }
+
+ return -ENODEV;
+}
+EXPORT_SYMBOL(acpi_match_platform_list);
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index d179e8d9177d..601e5d372887 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -103,6 +103,12 @@ static int video_detect_force_native(const struct dmi_system_id *d)
return 0;
}
+static int video_detect_force_none(const struct dmi_system_id *d)
+{
+ acpi_backlight_dmi = acpi_backlight_none;
+ return 0;
+}
+
static const struct dmi_system_id video_detect_dmi_table[] = {
/* On Samsung X360, the BIOS will set a flag (VDRV) if generic
* ACPI backlight device is used. This flag will definitively break
@@ -313,6 +319,14 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "Precision 7510"),
},
},
+ {
+ .callback = video_detect_force_none,
+ .ident = "Dell OptiPlex 9020M",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 9020M"),
+ },
+ },
{ },
};
diff --git a/drivers/acpi/x86/apple.c b/drivers/acpi/x86/apple.c
new file mode 100644
index 000000000000..51b4cf9f25da
--- /dev/null
+++ b/drivers/acpi/x86/apple.c
@@ -0,0 +1,141 @@
+/*
+ * apple.c - Apple ACPI quirks
+ * Copyright (C) 2017 Lukas Wunner <lukas@wunner.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2) as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/acpi.h>
+#include <linux/bitmap.h>
+#include <linux/platform_data/x86/apple.h>
+#include <linux/uuid.h>
+
+/* Apple _DSM device properties GUID */
+static const guid_t apple_prp_guid =
+ GUID_INIT(0xa0b5b7c6, 0x1318, 0x441c,
+ 0xb0, 0xc9, 0xfe, 0x69, 0x5e, 0xaf, 0x94, 0x9b);
+
+/**
+ * acpi_extract_apple_properties - retrieve and convert Apple _DSM properties
+ * @adev: ACPI device for which to retrieve the properties
+ *
+ * Invoke Apple's custom _DSM once to check the protocol version and once more
+ * to retrieve the properties. They are marshalled up in a single package as
+ * alternating key/value elements, unlike _DSD which stores them as a package
+ * of 2-element packages. Convert to _DSD format and make them available under
+ * the primary fwnode.
+ */
+void acpi_extract_apple_properties(struct acpi_device *adev)
+{
+ unsigned int i, j = 0, newsize = 0, numprops, numvalid;
+ union acpi_object *props, *newprops;
+ unsigned long *valid = NULL;
+ void *free_space;
+
+ if (!x86_apple_machine)
+ return;
+
+ props = acpi_evaluate_dsm_typed(adev->handle, &apple_prp_guid, 1, 0,
+ NULL, ACPI_TYPE_BUFFER);
+ if (!props)
+ return;
+
+ if (!props->buffer.length)
+ goto out_free;
+
+ if (props->buffer.pointer[0] != 3) {
+ acpi_handle_info(adev->handle, FW_INFO
+ "unsupported properties version %*ph\n",
+ props->buffer.length, props->buffer.pointer);
+ goto out_free;
+ }
+
+ ACPI_FREE(props);
+ props = acpi_evaluate_dsm_typed(adev->handle, &apple_prp_guid, 1, 1,
+ NULL, ACPI_TYPE_PACKAGE);
+ if (!props)
+ return;
+
+ numprops = props->package.count / 2;
+ if (!numprops)
+ goto out_free;
+
+ valid = kcalloc(BITS_TO_LONGS(numprops), sizeof(long), GFP_KERNEL);
+ if (!valid)
+ goto out_free;
+
+ /* newsize = key length + value length of each tuple */
+ for (i = 0; i < numprops; i++) {
+ union acpi_object *key = &props->package.elements[i * 2];
+ union acpi_object *val = &props->package.elements[i * 2 + 1];
+
+ if ( key->type != ACPI_TYPE_STRING ||
+ (val->type != ACPI_TYPE_INTEGER &&
+ val->type != ACPI_TYPE_BUFFER))
+ continue; /* skip invalid properties */
+
+ __set_bit(i, valid);
+ newsize += key->string.length + 1;
+ if ( val->type == ACPI_TYPE_BUFFER)
+ newsize += val->buffer.length;
+ }
+
+ numvalid = bitmap_weight(valid, numprops);
+ if (numprops > numvalid)
+ acpi_handle_info(adev->handle, FW_INFO
+ "skipped %u properties: wrong type\n",
+ numprops - numvalid);
+ if (numvalid == 0)
+ goto out_free;
+
+ /* newsize += top-level package + 3 objects for each key/value tuple */
+ newsize += (1 + 3 * numvalid) * sizeof(union acpi_object);
+ newprops = ACPI_ALLOCATE_ZEROED(newsize);
+ if (!newprops)
+ goto out_free;
+
+ /* layout: top-level package | packages | key/value tuples | strings */
+ newprops->type = ACPI_TYPE_PACKAGE;
+ newprops->package.count = numvalid;
+ newprops->package.elements = &newprops[1];
+ free_space = &newprops[1 + 3 * numvalid];
+
+ for_each_set_bit(i, valid, numprops) {
+ union acpi_object *key = &props->package.elements[i * 2];
+ union acpi_object *val = &props->package.elements[i * 2 + 1];
+ unsigned int k = 1 + numvalid + j * 2; /* index into newprops */
+ unsigned int v = k + 1;
+
+ newprops[1 + j].type = ACPI_TYPE_PACKAGE;
+ newprops[1 + j].package.count = 2;
+ newprops[1 + j].package.elements = &newprops[k];
+
+ newprops[k].type = ACPI_TYPE_STRING;
+ newprops[k].string.length = key->string.length;
+ newprops[k].string.pointer = free_space;
+ memcpy(free_space, key->string.pointer, key->string.length);
+ free_space += key->string.length + 1;
+
+ newprops[v].type = val->type;
+ if (val->type == ACPI_TYPE_INTEGER) {
+ newprops[v].integer.value = val->integer.value;
+ } else {
+ newprops[v].buffer.length = val->buffer.length;
+ newprops[v].buffer.pointer = free_space;
+ memcpy(free_space, val->buffer.pointer,
+ val->buffer.length);
+ free_space += val->buffer.length;
+ }
+ j++; /* count valid properties */
+ }
+ WARN_ON(free_space != (void *)newprops + newsize);
+
+ adev->data.properties = newprops;
+ adev->data.pointer = newprops;
+
+out_free:
+ ACPI_FREE(props);
+ kfree(valid);
+}
diff --git a/drivers/android/Kconfig b/drivers/android/Kconfig
index 832e885349b1..9801d852bd56 100644
--- a/drivers/android/Kconfig
+++ b/drivers/android/Kconfig
@@ -22,7 +22,7 @@ config ANDROID_BINDER_IPC
config ANDROID_BINDER_DEVICES
string "Android Binder devices"
depends on ANDROID_BINDER_IPC
- default "binder,hwbinder"
+ default "binder,hwbinder,vndbinder"
---help---
Default value for the binder.devices parameter.
@@ -32,7 +32,7 @@ config ANDROID_BINDER_DEVICES
therefore logically separated from the other devices.
config ANDROID_BINDER_IPC_32BIT
- bool
+ bool "Use old (Android 4.4 and earlier) 32-bit binder API"
depends on !64BIT && ANDROID_BINDER_IPC
default y
---help---
@@ -44,6 +44,16 @@ config ANDROID_BINDER_IPC_32BIT
Note that enabling this will break newer Android user-space.
+config ANDROID_BINDER_IPC_SELFTEST
+ bool "Android Binder IPC Driver Selftest"
+ depends on ANDROID_BINDER_IPC
+ ---help---
+ This feature allows binder selftest to run.
+
+ Binder selftest checks the allocation and free of binder buffers
+ exhaustively with combinations of various buffer sizes and
+ alignments.
+
endif # if ANDROID
endmenu
diff --git a/drivers/android/Makefile b/drivers/android/Makefile
index 3b7e4b072c58..a01254c43ee3 100644
--- a/drivers/android/Makefile
+++ b/drivers/android/Makefile
@@ -1,3 +1,4 @@
ccflags-y += -I$(src) # needed for trace events
-obj-$(CONFIG_ANDROID_BINDER_IPC) += binder.o
+obj-$(CONFIG_ANDROID_BINDER_IPC) += binder.o binder_alloc.o
+obj-$(CONFIG_ANDROID_BINDER_IPC_SELFTEST) += binder_alloc_selftest.o
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index f7665c31feca..d055b3f2a207 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -15,6 +15,40 @@
*
*/
+/*
+ * Locking overview
+ *
+ * There are 3 main spinlocks which must be acquired in the
+ * order shown:
+ *
+ * 1) proc->outer_lock : protects binder_ref
+ * binder_proc_lock() and binder_proc_unlock() are
+ * used to acq/rel.
+ * 2) node->lock : protects most fields of binder_node.
+ * binder_node_lock() and binder_node_unlock() are
+ * used to acq/rel
+ * 3) proc->inner_lock : protects the thread and node lists
+ * (proc->threads, proc->waiting_threads, proc->nodes)
+ * and all todo lists associated with the binder_proc
+ * (proc->todo, thread->todo, proc->delivered_death and
+ * node->async_todo), as well as thread->transaction_stack
+ * binder_inner_proc_lock() and binder_inner_proc_unlock()
+ * are used to acq/rel
+ *
+ * Any lock under procA must never be nested under any lock at the same
+ * level or below on procB.
+ *
+ * Functions that require a lock held on entry indicate which lock
+ * in the suffix of the function name:
+ *
+ * foo_olocked() : requires node->outer_lock
+ * foo_nlocked() : requires node->lock
+ * foo_ilocked() : requires proc->inner_lock
+ * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
+ * foo_nilocked(): requires node->lock and proc->inner_lock
+ * ...
+ */
+
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <asm/cacheflush.h>
@@ -24,7 +58,6 @@
#include <linux/fs.h>
#include <linux/list.h>
#include <linux/miscdevice.h>
-#include <linux/mm.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/nsproxy.h>
@@ -35,30 +68,31 @@
#include <linux/sched/mm.h>
#include <linux/seq_file.h>
#include <linux/uaccess.h>
-#include <linux/vmalloc.h>
-#include <linux/slab.h>
#include <linux/pid_namespace.h>
#include <linux/security.h>
+#include <linux/spinlock.h>
#ifdef CONFIG_ANDROID_BINDER_IPC_32BIT
#define BINDER_IPC_32BIT 1
#endif
#include <uapi/linux/android/binder.h>
+#include "binder_alloc.h"
#include "binder_trace.h"
-static DEFINE_MUTEX(binder_main_lock);
+static HLIST_HEAD(binder_deferred_list);
static DEFINE_MUTEX(binder_deferred_lock);
-static DEFINE_MUTEX(binder_mmap_lock);
static HLIST_HEAD(binder_devices);
static HLIST_HEAD(binder_procs);
-static HLIST_HEAD(binder_deferred_list);
+static DEFINE_MUTEX(binder_procs_lock);
+
static HLIST_HEAD(binder_dead_nodes);
+static DEFINE_SPINLOCK(binder_dead_nodes_lock);
static struct dentry *binder_debugfs_dir_entry_root;
static struct dentry *binder_debugfs_dir_entry_proc;
-static int binder_last_id;
+static atomic_t binder_last_id;
#define BINDER_DEBUG_ENTRY(name) \
static int binder_##name##_open(struct inode *inode, struct file *file) \
@@ -88,8 +122,6 @@ BINDER_DEBUG_ENTRY(proc);
#define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
-#define BINDER_SMALL_BUF_SIZE (PAGE_SIZE * 64)
-
enum {
BINDER_DEBUG_USER_ERROR = 1U << 0,
BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1,
@@ -104,17 +136,13 @@ enum {
BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10,
BINDER_DEBUG_FREE_BUFFER = 1U << 11,
BINDER_DEBUG_INTERNAL_REFS = 1U << 12,
- BINDER_DEBUG_BUFFER_ALLOC = 1U << 13,
- BINDER_DEBUG_PRIORITY_CAP = 1U << 14,
- BINDER_DEBUG_BUFFER_ALLOC_ASYNC = 1U << 15,
+ BINDER_DEBUG_PRIORITY_CAP = 1U << 13,
+ BINDER_DEBUG_SPINLOCKS = 1U << 14,
};
static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO);
-static bool binder_debug_no_lock;
-module_param_named(proc_no_lock, binder_debug_no_lock, bool, S_IWUSR | S_IRUGO);
-
static char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
module_param_named(devices, binder_devices_param, charp, 0444);
@@ -171,26 +199,27 @@ enum binder_stat_types {
};
struct binder_stats {
- int br[_IOC_NR(BR_FAILED_REPLY) + 1];
- int bc[_IOC_NR(BC_REPLY_SG) + 1];
- int obj_created[BINDER_STAT_COUNT];
- int obj_deleted[BINDER_STAT_COUNT];
+ atomic_t br[_IOC_NR(BR_FAILED_REPLY) + 1];
+ atomic_t bc[_IOC_NR(BC_REPLY_SG) + 1];
+ atomic_t obj_created[BINDER_STAT_COUNT];
+ atomic_t obj_deleted[BINDER_STAT_COUNT];
};
static struct binder_stats binder_stats;
static inline void binder_stats_deleted(enum binder_stat_types type)
{
- binder_stats.obj_deleted[type]++;
+ atomic_inc(&binder_stats.obj_deleted[type]);
}
static inline void binder_stats_created(enum binder_stat_types type)
{
- binder_stats.obj_created[type]++;
+ atomic_inc(&binder_stats.obj_created[type]);
}
struct binder_transaction_log_entry {
int debug_id;
+ int debug_id_done;
int call_type;
int from_proc;
int from_thread;
@@ -200,11 +229,14 @@ struct binder_transaction_log_entry {
int to_node;
int data_size;
int offsets_size;
+ int return_error_line;
+ uint32_t return_error;
+ uint32_t return_error_param;
const char *context_name;
};
struct binder_transaction_log {
- int next;
- int full;
+ atomic_t cur;
+ bool full;
struct binder_transaction_log_entry entry[32];
};
static struct binder_transaction_log binder_transaction_log;
@@ -214,19 +246,26 @@ static struct binder_transaction_log_entry *binder_transaction_log_add(
struct binder_transaction_log *log)
{
struct binder_transaction_log_entry *e;
+ unsigned int cur = atomic_inc_return(&log->cur);
- e = &log->entry[log->next];
- memset(e, 0, sizeof(*e));
- log->next++;
- if (log->next == ARRAY_SIZE(log->entry)) {
- log->next = 0;
+ if (cur >= ARRAY_SIZE(log->entry))
log->full = 1;
- }
+ e = &log->entry[cur % ARRAY_SIZE(log->entry)];
+ WRITE_ONCE(e->debug_id_done, 0);
+ /*
+ * write-barrier to synchronize access to e->debug_id_done.
+ * We make sure the initialized 0 value is seen before
+ * memset() other fields are zeroed by memset.
+ */
+ smp_wmb();
+ memset(e, 0, sizeof(*e));
return e;
}
struct binder_context {
struct binder_node *binder_context_mgr_node;
+ struct mutex context_mgr_node_lock;
+
kuid_t binder_context_mgr_uid;
const char *name;
};
@@ -237,11 +276,20 @@ struct binder_device {
struct binder_context context;
};
+/**
+ * struct binder_work - work enqueued on a worklist
+ * @entry: node enqueued on list
+ * @type: type of work to be performed
+ *
+ * There are separate work lists for proc, thread, and node (async).
+ */
struct binder_work {
struct list_head entry;
+
enum {
BINDER_WORK_TRANSACTION = 1,
BINDER_WORK_TRANSACTION_COMPLETE,
+ BINDER_WORK_RETURN_ERROR,
BINDER_WORK_NODE,
BINDER_WORK_DEAD_BINDER,
BINDER_WORK_DEAD_BINDER_AND_CLEAR,
@@ -249,8 +297,72 @@ struct binder_work {
} type;
};
+struct binder_error {
+ struct binder_work work;
+ uint32_t cmd;
+};
+
+/**
+ * struct binder_node - binder node bookkeeping
+ * @debug_id: unique ID for debugging
+ * (invariant after initialized)
+ * @lock: lock for node fields
+ * @work: worklist element for node work
+ * (protected by @proc->inner_lock)
+ * @rb_node: element for proc->nodes tree
+ * (protected by @proc->inner_lock)
+ * @dead_node: element for binder_dead_nodes list
+ * (protected by binder_dead_nodes_lock)
+ * @proc: binder_proc that owns this node
+ * (invariant after initialized)
+ * @refs: list of references on this node
+ * (protected by @lock)
+ * @internal_strong_refs: used to take strong references when
+ * initiating a transaction
+ * (protected by @proc->inner_lock if @proc
+ * and by @lock)
+ * @local_weak_refs: weak user refs from local process
+ * (protected by @proc->inner_lock if @proc
+ * and by @lock)
+ * @local_strong_refs: strong user refs from local process
+ * (protected by @proc->inner_lock if @proc
+ * and by @lock)
+ * @tmp_refs: temporary kernel refs
+ * (protected by @proc->inner_lock while @proc
+ * is valid, and by binder_dead_nodes_lock
+ * if @proc is NULL. During inc/dec and node release
+ * it is also protected by @lock to provide safety
+ * as the node dies and @proc becomes NULL)
+ * @ptr: userspace pointer for node
+ * (invariant, no lock needed)
+ * @cookie: userspace cookie for node
+ * (invariant, no lock needed)
+ * @has_strong_ref: userspace notified of strong ref
+ * (protected by @proc->inner_lock if @proc
+ * and by @lock)
+ * @pending_strong_ref: userspace has acked notification of strong ref
+ * (protected by @proc->inner_lock if @proc
+ * and by @lock)
+ * @has_weak_ref: userspace notified of weak ref
+ * (protected by @proc->inner_lock if @proc
+ * and by @lock)
+ * @pending_weak_ref: userspace has acked notification of weak ref
+ * (protected by @proc->inner_lock if @proc
+ * and by @lock)
+ * @has_async_transaction: async transaction to node in progress
+ * (protected by @lock)
+ * @accept_fds: file descriptor operations supported for node
+ * (invariant after initialized)
+ * @min_priority: minimum scheduling priority
+ * (invariant after initialized)
+ * @async_todo: list of async work items
+ * (protected by @proc->inner_lock)
+ *
+ * Bookkeeping structure for binder nodes.
+ */
struct binder_node {
int debug_id;
+ spinlock_t lock;
struct binder_work work;
union {
struct rb_node rb_node;
@@ -261,88 +373,167 @@ struct binder_node {
int internal_strong_refs;
int local_weak_refs;
int local_strong_refs;
+ int tmp_refs;
binder_uintptr_t ptr;
binder_uintptr_t cookie;
- unsigned has_strong_ref:1;
- unsigned pending_strong_ref:1;
- unsigned has_weak_ref:1;
- unsigned pending_weak_ref:1;
- unsigned has_async_transaction:1;
- unsigned accept_fds:1;
- unsigned min_priority:8;
+ struct {
+ /*
+ * bitfield elements protected by
+ * proc inner_lock
+ */
+ u8 has_strong_ref:1;
+ u8 pending_strong_ref:1;
+ u8 has_weak_ref:1;
+ u8 pending_weak_ref:1;
+ };
+ struct {
+ /*
+ * invariant after initialization
+ */
+ u8 accept_fds:1;
+ u8 min_priority;
+ };
+ bool has_async_transaction;
struct list_head async_todo;
};
struct binder_ref_death {
+ /**
+ * @work: worklist element for death notifications
+ * (protected by inner_lock of the proc that
+ * this ref belongs to)
+ */
struct binder_work work;
binder_uintptr_t cookie;
};
+/**
+ * struct binder_ref_data - binder_ref counts and id
+ * @debug_id: unique ID for the ref
+ * @desc: unique userspace handle for ref
+ * @strong: strong ref count (debugging only if not locked)
+ * @weak: weak ref count (debugging only if not locked)
+ *
+ * Structure to hold ref count and ref id information. Since
+ * the actual ref can only be accessed with a lock, this structure
+ * is used to return information about the ref to callers of
+ * ref inc/dec functions.
+ */
+struct binder_ref_data {
+ int debug_id;
+ uint32_t desc;
+ int strong;
+ int weak;
+};
+
+/**
+ * struct binder_ref - struct to track references on nodes
+ * @data: binder_ref_data containing id, handle, and current refcounts
+ * @rb_node_desc: node for lookup by @data.desc in proc's rb_tree
+ * @rb_node_node: node for lookup by @node in proc's rb_tree
+ * @node_entry: list entry for node->refs list in target node
+ * (protected by @node->lock)
+ * @proc: binder_proc containing ref
+ * @node: binder_node of target node. When cleaning up a
+ * ref for deletion in binder_cleanup_ref, a non-NULL
+ * @node indicates the node must be freed
+ * @death: pointer to death notification (ref_death) if requested
+ * (protected by @node->lock)
+ *
+ * Structure to track references from procA to target node (on procB). This
+ * structure is unsafe to access without holding @proc->outer_lock.
+ */
struct binder_ref {
/* Lookups needed: */
/* node + proc => ref (transaction) */
/* desc + proc => ref (transaction, inc/dec ref) */
/* node => refs + procs (proc exit) */
- int debug_id;
+ struct binder_ref_data data;
struct rb_node rb_node_desc;
struct rb_node rb_node_node;
struct hlist_node node_entry;
struct binder_proc *proc;
struct binder_node *node;
- uint32_t desc;
- int strong;
- int weak;
struct binder_ref_death *death;
};
-struct binder_buffer {
- struct list_head entry; /* free and allocated entries by address */
- struct rb_node rb_node; /* free entry by size or allocated entry */
- /* by address */
- unsigned free:1;
- unsigned allow_user_free:1;
- unsigned async_transaction:1;
- unsigned debug_id:29;
-
- struct binder_transaction *transaction;
-
- struct binder_node *target_node;
- size_t data_size;
- size_t offsets_size;
- size_t extra_buffers_size;
- uint8_t data[0];
-};
-
enum binder_deferred_state {
BINDER_DEFERRED_PUT_FILES = 0x01,
BINDER_DEFERRED_FLUSH = 0x02,
BINDER_DEFERRED_RELEASE = 0x04,
};
+/**
+ * struct binder_proc - binder process bookkeeping
+ * @proc_node: element for binder_procs list
+ * @threads: rbtree of binder_threads in this proc
+ * (protected by @inner_lock)
+ * @nodes: rbtree of binder nodes associated with
+ * this proc ordered by node->ptr
+ * (protected by @inner_lock)
+ * @refs_by_desc: rbtree of refs ordered by ref->desc
+ * (protected by @outer_lock)
+ * @refs_by_node: rbtree of refs ordered by ref->node
+ * (protected by @outer_lock)
+ * @waiting_threads: threads currently waiting for proc work
+ * (protected by @inner_lock)
+ * @pid PID of group_leader of process
+ * (invariant after initialized)
+ * @tsk task_struct for group_leader of process
+ * (invariant after initialized)
+ * @files files_struct for process
+ * (invariant after initialized)
+ * @deferred_work_node: element for binder_deferred_list
+ * (protected by binder_deferred_lock)
+ * @deferred_work: bitmap of deferred work to perform
+ * (protected by binder_deferred_lock)
+ * @is_dead: process is dead and awaiting free
+ * when outstanding transactions are cleaned up
+ * (protected by @inner_lock)
+ * @todo: list of work for this process
+ * (protected by @inner_lock)
+ * @wait: wait queue head to wait for proc work
+ * (invariant after initialized)
+ * @stats: per-process binder statistics
+ * (atomics, no lock needed)
+ * @delivered_death: list of delivered death notification
+ * (protected by @inner_lock)
+ * @max_threads: cap on number of binder threads
+ * (protected by @inner_lock)
+ * @requested_threads: number of binder threads requested but not
+ * yet started. In current implementation, can
+ * only be 0 or 1.
+ * (protected by @inner_lock)
+ * @requested_threads_started: number binder threads started
+ * (protected by @inner_lock)
+ * @tmp_ref: temporary reference to indicate proc is in use
+ * (protected by @inner_lock)
+ * @default_priority: default scheduler priority
+ * (invariant after initialized)
+ * @debugfs_entry: debugfs node
+ * @alloc: binder allocator bookkeeping
+ * @context: binder_context for this proc
+ * (invariant after initialized)
+ * @inner_lock: can nest under outer_lock and/or node lock
+ * @outer_lock: no nesting under innor or node lock
+ * Lock order: 1) outer, 2) node, 3) inner
+ *
+ * Bookkeeping structure for binder processes
+ */
struct binder_proc {
struct hlist_node proc_node;
struct rb_root threads;
struct rb_root nodes;
struct rb_root refs_by_desc;
struct rb_root refs_by_node;
+ struct list_head waiting_threads;
int pid;
- struct vm_area_struct *vma;
- struct mm_struct *vma_vm_mm;
struct task_struct *tsk;
struct files_struct *files;
struct hlist_node deferred_work_node;
int deferred_work;
- void *buffer;
- ptrdiff_t user_buffer_offset;
-
- struct list_head buffers;
- struct rb_root free_buffers;
- struct rb_root allocated_buffers;
- size_t free_async_space;
+ bool is_dead;
- struct page **pages;
- size_t buffer_size;
- uint32_t buffer_free;
struct list_head todo;
wait_queue_head_t wait;
struct binder_stats stats;
@@ -350,10 +541,13 @@ struct binder_proc {
int max_threads;
int requested_threads;
int requested_threads_started;
- int ready_threads;
+ int tmp_ref;
long default_priority;
struct dentry *debugfs_entry;
+ struct binder_alloc alloc;
struct binder_context *context;
+ spinlock_t inner_lock;
+ spinlock_t outer_lock;
};
enum {
@@ -362,22 +556,58 @@ enum {
BINDER_LOOPER_STATE_EXITED = 0x04,
BINDER_LOOPER_STATE_INVALID = 0x08,
BINDER_LOOPER_STATE_WAITING = 0x10,
- BINDER_LOOPER_STATE_NEED_RETURN = 0x20
+ BINDER_LOOPER_STATE_POLL = 0x20,
};
+/**
+ * struct binder_thread - binder thread bookkeeping
+ * @proc: binder process for this thread
+ * (invariant after initialization)
+ * @rb_node: element for proc->threads rbtree
+ * (protected by @proc->inner_lock)
+ * @waiting_thread_node: element for @proc->waiting_threads list
+ * (protected by @proc->inner_lock)
+ * @pid: PID for this thread
+ * (invariant after initialization)
+ * @looper: bitmap of looping state
+ * (only accessed by this thread)
+ * @looper_needs_return: looping thread needs to exit driver
+ * (no lock needed)
+ * @transaction_stack: stack of in-progress transactions for this thread
+ * (protected by @proc->inner_lock)
+ * @todo: list of work to do for this thread
+ * (protected by @proc->inner_lock)
+ * @return_error: transaction errors reported by this thread
+ * (only accessed by this thread)
+ * @reply_error: transaction errors reported by target thread
+ * (protected by @proc->inner_lock)
+ * @wait: wait queue for thread work
+ * @stats: per-thread statistics
+ * (atomics, no lock needed)
+ * @tmp_ref: temporary reference to indicate thread is in use
+ * (atomic since @proc->inner_lock cannot
+ * always be acquired)
+ * @is_dead: thread is dead and awaiting free
+ * when outstanding transactions are cleaned up
+ * (protected by @proc->inner_lock)
+ *
+ * Bookkeeping structure for binder threads.
+ */
struct binder_thread {
struct binder_proc *proc;
struct rb_node rb_node;
+ struct list_head waiting_thread_node;
int pid;
- int looper;
+ int looper; /* only modified by this thread */
+ bool looper_need_return; /* can be written by other thread */
struct binder_transaction *transaction_stack;
struct list_head todo;
- uint32_t return_error; /* Write failed, return error code in read buf */
- uint32_t return_error2; /* Write failed, return error code in read */
- /* buffer. Used when sending a reply to a dead process that */
- /* we are also waiting on */
+ struct binder_error return_error;
+ struct binder_error reply_error;
wait_queue_head_t wait;
struct binder_stats stats;
+ atomic_t tmp_ref;
+ bool is_dead;
};
struct binder_transaction {
@@ -397,10 +627,253 @@ struct binder_transaction {
long priority;
long saved_priority;
kuid_t sender_euid;
+ /**
+ * @lock: protects @from, @to_proc, and @to_thread
+ *
+ * @from, @to_proc, and @to_thread can be set to NULL
+ * during thread teardown
+ */
+ spinlock_t lock;
};
+/**
+ * binder_proc_lock() - Acquire outer lock for given binder_proc
+ * @proc: struct binder_proc to acquire
+ *
+ * Acquires proc->outer_lock. Used to protect binder_ref
+ * structures associated with the given proc.
+ */
+#define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
+static void
+_binder_proc_lock(struct binder_proc *proc, int line)
+{
+ binder_debug(BINDER_DEBUG_SPINLOCKS,
+ "%s: line=%d\n", __func__, line);
+ spin_lock(&proc->outer_lock);
+}
+
+/**
+ * binder_proc_unlock() - Release spinlock for given binder_proc
+ * @proc: struct binder_proc to acquire
+ *
+ * Release lock acquired via binder_proc_lock()
+ */
+#define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__)
+static void
+_binder_proc_unlock(struct binder_proc *proc, int line)
+{
+ binder_debug(BINDER_DEBUG_SPINLOCKS,
+ "%s: line=%d\n", __func__, line);
+ spin_unlock(&proc->outer_lock);
+}
+
+/**
+ * binder_inner_proc_lock() - Acquire inner lock for given binder_proc
+ * @proc: struct binder_proc to acquire
+ *
+ * Acquires proc->inner_lock. Used to protect todo lists
+ */
+#define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
+static void
+_binder_inner_proc_lock(struct binder_proc *proc, int line)
+{
+ binder_debug(BINDER_DEBUG_SPINLOCKS,
+ "%s: line=%d\n", __func__, line);
+ spin_lock(&proc->inner_lock);
+}
+
+/**
+ * binder_inner_proc_unlock() - Release inner lock for given binder_proc
+ * @proc: struct binder_proc to acquire
+ *
+ * Release lock acquired via binder_inner_proc_lock()
+ */
+#define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
+static void
+_binder_inner_proc_unlock(struct binder_proc *proc, int line)
+{
+ binder_debug(BINDER_DEBUG_SPINLOCKS,
+ "%s: line=%d\n", __func__, line);
+ spin_unlock(&proc->inner_lock);
+}
+
+/**
+ * binder_node_lock() - Acquire spinlock for given binder_node
+ * @node: struct binder_node to acquire
+ *
+ * Acquires node->lock. Used to protect binder_node fields
+ */
+#define binder_node_lock(node) _binder_node_lock(node, __LINE__)
+static void
+_binder_node_lock(struct binder_node *node, int line)
+{
+ binder_debug(BINDER_DEBUG_SPINLOCKS,
+ "%s: line=%d\n", __func__, line);
+ spin_lock(&node->lock);
+}
+
+/**
+ * binder_node_unlock() - Release spinlock for given binder_proc
+ * @node: struct binder_node to acquire
+ *
+ * Release lock acquired via binder_node_lock()
+ */
+#define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
+static void
+_binder_node_unlock(struct binder_node *node, int line)
+{
+ binder_debug(BINDER_DEBUG_SPINLOCKS,
+ "%s: line=%d\n", __func__, line);
+ spin_unlock(&node->lock);
+}
+
+/**
+ * binder_node_inner_lock() - Acquire node and inner locks
+ * @node: struct binder_node to acquire
+ *
+ * Acquires node->lock. If node->proc also acquires
+ * proc->inner_lock. Used to protect binder_node fields
+ */
+#define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
+static void
+_binder_node_inner_lock(struct binder_node *node, int line)
+{
+ binder_debug(BINDER_DEBUG_SPINLOCKS,
+ "%s: line=%d\n", __func__, line);
+ spin_lock(&node->lock);
+ if (node->proc)
+ binder_inner_proc_lock(node->proc);
+}
+
+/**
+ * binder_node_unlock() - Release node and inner locks
+ * @node: struct binder_node to acquire
+ *
+ * Release lock acquired via binder_node_lock()
+ */
+#define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
+static void
+_binder_node_inner_unlock(struct binder_node *node, int line)
+{
+ struct binder_proc *proc = node->proc;
+
+ binder_debug(BINDER_DEBUG_SPINLOCKS,
+ "%s: line=%d\n", __func__, line);
+ if (proc)
+ binder_inner_proc_unlock(proc);
+ spin_unlock(&node->lock);
+}
+
+static bool binder_worklist_empty_ilocked(struct list_head *list)
+{
+ return list_empty(list);
+}
+
+/**
+ * binder_worklist_empty() - Check if no items on the work list
+ * @proc: binder_proc associated with list
+ * @list: list to check
+ *
+ * Return: true if there are no items on list, else false
+ */
+static bool binder_worklist_empty(struct binder_proc *proc,
+ struct list_head *list)
+{
+ bool ret;
+
+ binder_inner_proc_lock(proc);
+ ret = binder_worklist_empty_ilocked(list);
+ binder_inner_proc_unlock(proc);
+ return ret;
+}
+
+static void
+binder_enqueue_work_ilocked(struct binder_work *work,
+ struct list_head *target_list)
+{
+ BUG_ON(target_list == NULL);
+ BUG_ON(work->entry.next && !list_empty(&work->entry));
+ list_add_tail(&work->entry, target_list);
+}
+
+/**
+ * binder_enqueue_work() - Add an item to the work list
+ * @proc: binder_proc associated with list
+ * @work: struct binder_work to add to list
+ * @target_list: list to add work to
+ *
+ * Adds the work to the specified list. Asserts that work
+ * is not already on a list.
+ */
+static void
+binder_enqueue_work(struct binder_proc *proc,
+ struct binder_work *work,
+ struct list_head *target_list)
+{
+ binder_inner_proc_lock(proc);
+ binder_enqueue_work_ilocked(work, target_list);
+ binder_inner_proc_unlock(proc);
+}
+
+static void
+binder_dequeue_work_ilocked(struct binder_work *work)
+{
+ list_del_init(&work->entry);
+}
+
+/**
+ * binder_dequeue_work() - Removes an item from the work list
+ * @proc: binder_proc associated with list
+ * @work: struct binder_work to remove from list
+ *
+ * Removes the specified work item from whatever list it is on.
+ * Can safely be called if work is not on any list.
+ */
+static void
+binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
+{
+ binder_inner_proc_lock(proc);
+ binder_dequeue_work_ilocked(work);
+ binder_inner_proc_unlock(proc);
+}
+
+static struct binder_work *binder_dequeue_work_head_ilocked(
+ struct list_head *list)
+{
+ struct binder_work *w;
+
+ w = list_first_entry_or_null(list, struct binder_work, entry);
+ if (w)
+ list_del_init(&w->entry);
+ return w;
+}
+
+/**
+ * binder_dequeue_work_head() - Dequeues the item at head of list
+ * @proc: binder_proc associated with list
+ * @list: list to dequeue head
+ *
+ * Removes the head of the list if there are items on the list
+ *
+ * Return: pointer dequeued binder_work, NULL if list was empty
+ */
+static struct binder_work *binder_dequeue_work_head(
+ struct binder_proc *proc,
+ struct list_head *list)
+{
+ struct binder_work *w;
+
+ binder_inner_proc_lock(proc);
+ w = binder_dequeue_work_head_ilocked(list);
+ binder_inner_proc_unlock(proc);
+ return w;
+}
+
static void
binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
+static void binder_free_thread(struct binder_thread *thread);
+static void binder_free_proc(struct binder_proc *proc);
+static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
{
@@ -451,462 +924,159 @@ static long task_close_fd(struct binder_proc *proc, unsigned int fd)
return retval;
}
-static inline void binder_lock(const char *tag)
-{
- trace_binder_lock(tag);
- mutex_lock(&binder_main_lock);
- trace_binder_locked(tag);
-}
-
-static inline void binder_unlock(const char *tag)
-{
- trace_binder_unlock(tag);
- mutex_unlock(&binder_main_lock);
-}
-
-static void binder_set_nice(long nice)
-{
- long min_nice;
-
- if (can_nice(current, nice)) {
- set_user_nice(current, nice);
- return;
- }
- min_nice = rlimit_to_nice(current->signal->rlim[RLIMIT_NICE].rlim_cur);
- binder_debug(BINDER_DEBUG_PRIORITY_CAP,
- "%d: nice value %ld not allowed use %ld instead\n",
- current->pid, nice, min_nice);
- set_user_nice(current, min_nice);
- if (min_nice <= MAX_NICE)
- return;
- binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
-}
-
-static size_t binder_buffer_size(struct binder_proc *proc,
- struct binder_buffer *buffer)
+static bool binder_has_work_ilocked(struct binder_thread *thread,
+ bool do_proc_work)
{
- if (list_is_last(&buffer->entry, &proc->buffers))
- return proc->buffer + proc->buffer_size - (void *)buffer->data;
- return (size_t)list_entry(buffer->entry.next,
- struct binder_buffer, entry) - (size_t)buffer->data;
+ return !binder_worklist_empty_ilocked(&thread->todo) ||
+ thread->looper_need_return ||
+ (do_proc_work &&
+ !binder_worklist_empty_ilocked(&thread->proc->todo));
}
-static void binder_insert_free_buffer(struct binder_proc *proc,
- struct binder_buffer *new_buffer)
+static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
{
- struct rb_node **p = &proc->free_buffers.rb_node;
- struct rb_node *parent = NULL;
- struct binder_buffer *buffer;
- size_t buffer_size;
- size_t new_buffer_size;
+ bool has_work;
- BUG_ON(!new_buffer->free);
+ binder_inner_proc_lock(thread->proc);
+ has_work = binder_has_work_ilocked(thread, do_proc_work);
+ binder_inner_proc_unlock(thread->proc);
- new_buffer_size = binder_buffer_size(proc, new_buffer);
-
- binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%d: add free buffer, size %zd, at %p\n",
- proc->pid, new_buffer_size, new_buffer);
-
- while (*p) {
- parent = *p;
- buffer = rb_entry(parent, struct binder_buffer, rb_node);
- BUG_ON(!buffer->free);
-
- buffer_size = binder_buffer_size(proc, buffer);
-
- if (new_buffer_size < buffer_size)
- p = &parent->rb_left;
- else
- p = &parent->rb_right;
- }
- rb_link_node(&new_buffer->rb_node, parent, p);
- rb_insert_color(&new_buffer->rb_node, &proc->free_buffers);
+ return has_work;
}
-static void binder_insert_allocated_buffer(struct binder_proc *proc,
- struct binder_buffer *new_buffer)
+static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
{
- struct rb_node **p = &proc->allocated_buffers.rb_node;
- struct rb_node *parent = NULL;
- struct binder_buffer *buffer;
-
- BUG_ON(new_buffer->free);
-
- while (*p) {
- parent = *p;
- buffer = rb_entry(parent, struct binder_buffer, rb_node);
- BUG_ON(buffer->free);
-
- if (new_buffer < buffer)
- p = &parent->rb_left;
- else if (new_buffer > buffer)
- p = &parent->rb_right;
- else
- BUG();
- }
- rb_link_node(&new_buffer->rb_node, parent, p);
- rb_insert_color(&new_buffer->rb_node, &proc->allocated_buffers);
+ return !thread->transaction_stack &&
+ binder_worklist_empty_ilocked(&thread->todo) &&
+ (thread->looper & (BINDER_LOOPER_STATE_ENTERED |
+ BINDER_LOOPER_STATE_REGISTERED));
}
-static struct binder_buffer *binder_buffer_lookup(struct binder_proc *proc,
- uintptr_t user_ptr)
+static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
+ bool sync)
{
- struct rb_node *n = proc->allocated_buffers.rb_node;
- struct binder_buffer *buffer;
- struct binder_buffer *kern_ptr;
-
- kern_ptr = (struct binder_buffer *)(user_ptr - proc->user_buffer_offset
- - offsetof(struct binder_buffer, data));
-
- while (n) {
- buffer = rb_entry(n, struct binder_buffer, rb_node);
- BUG_ON(buffer->free);
+ struct rb_node *n;
+ struct binder_thread *thread;
- if (kern_ptr < buffer)
- n = n->rb_left;
- else if (kern_ptr > buffer)
- n = n->rb_right;
- else
- return buffer;
+ for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
+ thread = rb_entry(n, struct binder_thread, rb_node);
+ if (thread->looper & BINDER_LOOPER_STATE_POLL &&
+ binder_available_for_proc_work_ilocked(thread)) {
+ if (sync)
+ wake_up_interruptible_sync(&thread->wait);
+ else
+ wake_up_interruptible(&thread->wait);
+ }
}
- return NULL;
}
-static int binder_update_page_range(struct binder_proc *proc, int allocate,
- void *start, void *end,
- struct vm_area_struct *vma)
+/**
+ * binder_select_thread_ilocked() - selects a thread for doing proc work.
+ * @proc: process to select a thread from
+ *
+ * Note that calling this function moves the thread off the waiting_threads
+ * list, so it can only be woken up by the caller of this function, or a
+ * signal. Therefore, callers *should* always wake up the thread this function
+ * returns.
+ *
+ * Return: If there's a thread currently waiting for process work,
+ * returns that thread. Otherwise returns NULL.
+ */
+static struct binder_thread *
+binder_select_thread_ilocked(struct binder_proc *proc)
{
- void *page_addr;
- unsigned long user_page_addr;
- struct page **page;
- struct mm_struct *mm;
-
- binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%d: %s pages %p-%p\n", proc->pid,
- allocate ? "allocate" : "free", start, end);
-
- if (end <= start)
- return 0;
-
- trace_binder_update_page_range(proc, allocate, start, end);
-
- if (vma)
- mm = NULL;
- else
- mm = get_task_mm(proc->tsk);
-
- if (mm) {
- down_write(&mm->mmap_sem);
- vma = proc->vma;
- if (vma && mm != proc->vma_vm_mm) {
- pr_err("%d: vma mm and task mm mismatch\n",
- proc->pid);
- vma = NULL;
- }
- }
-
- if (allocate == 0)
- goto free_range;
-
- if (vma == NULL) {
- pr_err("%d: binder_alloc_buf failed to map pages in userspace, no vma\n",
- proc->pid);
- goto err_no_vma;
- }
-
- for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
- int ret;
+ struct binder_thread *thread;
- page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
+ assert_spin_locked(&proc->inner_lock);
+ thread = list_first_entry_or_null(&proc->waiting_threads,
+ struct binder_thread,
+ waiting_thread_node);
- BUG_ON(*page);
- *page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
- if (*page == NULL) {
- pr_err("%d: binder_alloc_buf failed for page at %p\n",
- proc->pid, page_addr);
- goto err_alloc_page_failed;
- }
- ret = map_kernel_range_noflush((unsigned long)page_addr,
- PAGE_SIZE, PAGE_KERNEL, page);
- flush_cache_vmap((unsigned long)page_addr,
- (unsigned long)page_addr + PAGE_SIZE);
- if (ret != 1) {
- pr_err("%d: binder_alloc_buf failed to map page at %p in kernel\n",
- proc->pid, page_addr);
- goto err_map_kernel_failed;
- }
- user_page_addr =
- (uintptr_t)page_addr + proc->user_buffer_offset;
- ret = vm_insert_page(vma, user_page_addr, page[0]);
- if (ret) {
- pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n",
- proc->pid, user_page_addr);
- goto err_vm_insert_page_failed;
- }
- /* vm_insert_page does not seem to increment the refcount */
- }
- if (mm) {
- up_write(&mm->mmap_sem);
- mmput(mm);
- }
- return 0;
+ if (thread)
+ list_del_init(&thread->waiting_thread_node);
-free_range:
- for (page_addr = end - PAGE_SIZE; page_addr >= start;
- page_addr -= PAGE_SIZE) {
- page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
- if (vma)
- zap_page_range(vma, (uintptr_t)page_addr +
- proc->user_buffer_offset, PAGE_SIZE);
-err_vm_insert_page_failed:
- unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
-err_map_kernel_failed:
- __free_page(*page);
- *page = NULL;
-err_alloc_page_failed:
- ;
- }
-err_no_vma:
- if (mm) {
- up_write(&mm->mmap_sem);
- mmput(mm);
- }
- return -ENOMEM;
+ return thread;
}
-static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc,
- size_t data_size,
- size_t offsets_size,
- size_t extra_buffers_size,
- int is_async)
+/**
+ * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work.
+ * @proc: process to wake up a thread in
+ * @thread: specific thread to wake-up (may be NULL)
+ * @sync: whether to do a synchronous wake-up
+ *
+ * This function wakes up a thread in the @proc process.
+ * The caller may provide a specific thread to wake-up in
+ * the @thread parameter. If @thread is NULL, this function
+ * will wake up threads that have called poll().
+ *
+ * Note that for this function to work as expected, callers
+ * should first call binder_select_thread() to find a thread
+ * to handle the work (if they don't have a thread already),
+ * and pass the result into the @thread parameter.
+ */
+static void binder_wakeup_thread_ilocked(struct binder_proc *proc,
+ struct binder_thread *thread,
+ bool sync)
{
- struct rb_node *n = proc->free_buffers.rb_node;
- struct binder_buffer *buffer;
- size_t buffer_size;
- struct rb_node *best_fit = NULL;
- void *has_page_addr;
- void *end_page_addr;
- size_t size, data_offsets_size;
-
- if (proc->vma == NULL) {
- pr_err("%d: binder_alloc_buf, no vma\n",
- proc->pid);
- return NULL;
- }
-
- data_offsets_size = ALIGN(data_size, sizeof(void *)) +
- ALIGN(offsets_size, sizeof(void *));
-
- if (data_offsets_size < data_size || data_offsets_size < offsets_size) {
- binder_user_error("%d: got transaction with invalid size %zd-%zd\n",
- proc->pid, data_size, offsets_size);
- return NULL;
- }
- size = data_offsets_size + ALIGN(extra_buffers_size, sizeof(void *));
- if (size < data_offsets_size || size < extra_buffers_size) {
- binder_user_error("%d: got transaction with invalid extra_buffers_size %zd\n",
- proc->pid, extra_buffers_size);
- return NULL;
- }
- if (is_async &&
- proc->free_async_space < size + sizeof(struct binder_buffer)) {
- binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%d: binder_alloc_buf size %zd failed, no async space left\n",
- proc->pid, size);
- return NULL;
- }
+ assert_spin_locked(&proc->inner_lock);
- while (n) {
- buffer = rb_entry(n, struct binder_buffer, rb_node);
- BUG_ON(!buffer->free);
- buffer_size = binder_buffer_size(proc, buffer);
-
- if (size < buffer_size) {
- best_fit = n;
- n = n->rb_left;
- } else if (size > buffer_size)
- n = n->rb_right;
- else {
- best_fit = n;
- break;
- }
- }
- if (best_fit == NULL) {
- pr_err("%d: binder_alloc_buf size %zd failed, no address space\n",
- proc->pid, size);
- return NULL;
- }
- if (n == NULL) {
- buffer = rb_entry(best_fit, struct binder_buffer, rb_node);
- buffer_size = binder_buffer_size(proc, buffer);
- }
-
- binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%d: binder_alloc_buf size %zd got buffer %p size %zd\n",
- proc->pid, size, buffer, buffer_size);
-
- has_page_addr =
- (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK);
- if (n == NULL) {
- if (size + sizeof(struct binder_buffer) + 4 >= buffer_size)
- buffer_size = size; /* no room for other buffers */
+ if (thread) {
+ if (sync)
+ wake_up_interruptible_sync(&thread->wait);
else
- buffer_size = size + sizeof(struct binder_buffer);
- }
- end_page_addr =
- (void *)PAGE_ALIGN((uintptr_t)buffer->data + buffer_size);
- if (end_page_addr > has_page_addr)
- end_page_addr = has_page_addr;
- if (binder_update_page_range(proc, 1,
- (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL))
- return NULL;
-
- rb_erase(best_fit, &proc->free_buffers);
- buffer->free = 0;
- binder_insert_allocated_buffer(proc, buffer);
- if (buffer_size != size) {
- struct binder_buffer *new_buffer = (void *)buffer->data + size;
-
- list_add(&new_buffer->entry, &buffer->entry);
- new_buffer->free = 1;
- binder_insert_free_buffer(proc, new_buffer);
- }
- binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%d: binder_alloc_buf size %zd got %p\n",
- proc->pid, size, buffer);
- buffer->data_size = data_size;
- buffer->offsets_size = offsets_size;
- buffer->extra_buffers_size = extra_buffers_size;
- buffer->async_transaction = is_async;
- if (is_async) {
- proc->free_async_space -= size + sizeof(struct binder_buffer);
- binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
- "%d: binder_alloc_buf size %zd async free %zd\n",
- proc->pid, size, proc->free_async_space);
+ wake_up_interruptible(&thread->wait);
+ return;
}
- return buffer;
-}
-
-static void *buffer_start_page(struct binder_buffer *buffer)
-{
- return (void *)((uintptr_t)buffer & PAGE_MASK);
+ /* Didn't find a thread waiting for proc work; this can happen
+ * in two scenarios:
+ * 1. All threads are busy handling transactions
+ * In that case, one of those threads should call back into
+ * the kernel driver soon and pick up this work.
+ * 2. Threads are using the (e)poll interface, in which case
+ * they may be blocked on the waitqueue without having been
+ * added to waiting_threads. For this case, we just iterate
+ * over all threads not handling transaction work, and
+ * wake them all up. We wake all because we don't know whether
+ * a thread that called into (e)poll is handling non-binder
+ * work currently.
+ */
+ binder_wakeup_poll_threads_ilocked(proc, sync);
}
-static void *buffer_end_page(struct binder_buffer *buffer)
+static void binder_wakeup_proc_ilocked(struct binder_proc *proc)
{
- return (void *)(((uintptr_t)(buffer + 1) - 1) & PAGE_MASK);
-}
+ struct binder_thread *thread = binder_select_thread_ilocked(proc);
-static void binder_delete_free_buffer(struct binder_proc *proc,
- struct binder_buffer *buffer)
-{
- struct binder_buffer *prev, *next = NULL;
- int free_page_end = 1;
- int free_page_start = 1;
-
- BUG_ON(proc->buffers.next == &buffer->entry);
- prev = list_entry(buffer->entry.prev, struct binder_buffer, entry);
- BUG_ON(!prev->free);
- if (buffer_end_page(prev) == buffer_start_page(buffer)) {
- free_page_start = 0;
- if (buffer_end_page(prev) == buffer_end_page(buffer))
- free_page_end = 0;
- binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%d: merge free, buffer %p share page with %p\n",
- proc->pid, buffer, prev);
- }
-
- if (!list_is_last(&buffer->entry, &proc->buffers)) {
- next = list_entry(buffer->entry.next,
- struct binder_buffer, entry);
- if (buffer_start_page(next) == buffer_end_page(buffer)) {
- free_page_end = 0;
- if (buffer_start_page(next) ==
- buffer_start_page(buffer))
- free_page_start = 0;
- binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%d: merge free, buffer %p share page with %p\n",
- proc->pid, buffer, prev);
- }
- }
- list_del(&buffer->entry);
- if (free_page_start || free_page_end) {
- binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%d: merge free, buffer %p do not share page%s%s with %p or %p\n",
- proc->pid, buffer, free_page_start ? "" : " end",
- free_page_end ? "" : " start", prev, next);
- binder_update_page_range(proc, 0, free_page_start ?
- buffer_start_page(buffer) : buffer_end_page(buffer),
- (free_page_end ? buffer_end_page(buffer) :
- buffer_start_page(buffer)) + PAGE_SIZE, NULL);
- }
+ binder_wakeup_thread_ilocked(proc, thread, /* sync = */false);
}
-static void binder_free_buf(struct binder_proc *proc,
- struct binder_buffer *buffer)
+static void binder_set_nice(long nice)
{
- size_t size, buffer_size;
-
- buffer_size = binder_buffer_size(proc, buffer);
-
- size = ALIGN(buffer->data_size, sizeof(void *)) +
- ALIGN(buffer->offsets_size, sizeof(void *)) +
- ALIGN(buffer->extra_buffers_size, sizeof(void *));
-
- binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%d: binder_free_buf %p size %zd buffer_size %zd\n",
- proc->pid, buffer, size, buffer_size);
-
- BUG_ON(buffer->free);
- BUG_ON(size > buffer_size);
- BUG_ON(buffer->transaction != NULL);
- BUG_ON((void *)buffer < proc->buffer);
- BUG_ON((void *)buffer > proc->buffer + proc->buffer_size);
-
- if (buffer->async_transaction) {
- proc->free_async_space += size + sizeof(struct binder_buffer);
-
- binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
- "%d: binder_free_buf size %zd async free %zd\n",
- proc->pid, size, proc->free_async_space);
- }
+ long min_nice;
- binder_update_page_range(proc, 0,
- (void *)PAGE_ALIGN((uintptr_t)buffer->data),
- (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK),
- NULL);
- rb_erase(&buffer->rb_node, &proc->allocated_buffers);
- buffer->free = 1;
- if (!list_is_last(&buffer->entry, &proc->buffers)) {
- struct binder_buffer *next = list_entry(buffer->entry.next,
- struct binder_buffer, entry);
-
- if (next->free) {
- rb_erase(&next->rb_node, &proc->free_buffers);
- binder_delete_free_buffer(proc, next);
- }
- }
- if (proc->buffers.next != &buffer->entry) {
- struct binder_buffer *prev = list_entry(buffer->entry.prev,
- struct binder_buffer, entry);
-
- if (prev->free) {
- binder_delete_free_buffer(proc, buffer);
- rb_erase(&prev->rb_node, &proc->free_buffers);
- buffer = prev;
- }
+ if (can_nice(current, nice)) {
+ set_user_nice(current, nice);
+ return;
}
- binder_insert_free_buffer(proc, buffer);
+ min_nice = rlimit_to_nice(rlimit(RLIMIT_NICE));
+ binder_debug(BINDER_DEBUG_PRIORITY_CAP,
+ "%d: nice value %ld not allowed use %ld instead\n",
+ current->pid, nice, min_nice);
+ set_user_nice(current, min_nice);
+ if (min_nice <= MAX_NICE)
+ return;
+ binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
}
-static struct binder_node *binder_get_node(struct binder_proc *proc,
- binder_uintptr_t ptr)
+static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
+ binder_uintptr_t ptr)
{
struct rb_node *n = proc->nodes.rb_node;
struct binder_node *node;
+ assert_spin_locked(&proc->inner_lock);
+
while (n) {
node = rb_entry(n, struct binder_node, rb_node);
@@ -914,21 +1084,46 @@ static struct binder_node *binder_get_node(struct binder_proc *proc,
n = n->rb_left;
else if (ptr > node->ptr)
n = n->rb_right;
- else
+ else {
+ /*
+ * take an implicit weak reference
+ * to ensure node stays alive until
+ * call to binder_put_node()
+ */
+ binder_inc_node_tmpref_ilocked(node);
return node;
+ }
}
return NULL;
}
-static struct binder_node *binder_new_node(struct binder_proc *proc,
- binder_uintptr_t ptr,
- binder_uintptr_t cookie)
+static struct binder_node *binder_get_node(struct binder_proc *proc,
+ binder_uintptr_t ptr)
+{
+ struct binder_node *node;
+
+ binder_inner_proc_lock(proc);
+ node = binder_get_node_ilocked(proc, ptr);
+ binder_inner_proc_unlock(proc);
+ return node;
+}
+
+static struct binder_node *binder_init_node_ilocked(
+ struct binder_proc *proc,
+ struct binder_node *new_node,
+ struct flat_binder_object *fp)
{
struct rb_node **p = &proc->nodes.rb_node;
struct rb_node *parent = NULL;
struct binder_node *node;
+ binder_uintptr_t ptr = fp ? fp->binder : 0;
+ binder_uintptr_t cookie = fp ? fp->cookie : 0;
+ __u32 flags = fp ? fp->flags : 0;
+
+ assert_spin_locked(&proc->inner_lock);
while (*p) {
+
parent = *p;
node = rb_entry(parent, struct binder_node, rb_node);
@@ -936,33 +1131,74 @@ static struct binder_node *binder_new_node(struct binder_proc *proc,
p = &(*p)->rb_left;
else if (ptr > node->ptr)
p = &(*p)->rb_right;
- else
- return NULL;
+ else {
+ /*
+ * A matching node is already in
+ * the rb tree. Abandon the init
+ * and return it.
+ */
+ binder_inc_node_tmpref_ilocked(node);
+ return node;
+ }
}
-
- node = kzalloc(sizeof(*node), GFP_KERNEL);
- if (node == NULL)
- return NULL;
+ node = new_node;
binder_stats_created(BINDER_STAT_NODE);
+ node->tmp_refs++;
rb_link_node(&node->rb_node, parent, p);
rb_insert_color(&node->rb_node, &proc->nodes);
- node->debug_id = ++binder_last_id;
+ node->debug_id = atomic_inc_return(&binder_last_id);
node->proc = proc;
node->ptr = ptr;
node->cookie = cookie;
node->work.type = BINDER_WORK_NODE;
+ node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
+ node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
+ spin_lock_init(&node->lock);
INIT_LIST_HEAD(&node->work.entry);
INIT_LIST_HEAD(&node->async_todo);
binder_debug(BINDER_DEBUG_INTERNAL_REFS,
"%d:%d node %d u%016llx c%016llx created\n",
proc->pid, current->pid, node->debug_id,
(u64)node->ptr, (u64)node->cookie);
+
return node;
}
-static int binder_inc_node(struct binder_node *node, int strong, int internal,
- struct list_head *target_list)
+static struct binder_node *binder_new_node(struct binder_proc *proc,
+ struct flat_binder_object *fp)
{
+ struct binder_node *node;
+ struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);
+
+ if (!new_node)
+ return NULL;
+ binder_inner_proc_lock(proc);
+ node = binder_init_node_ilocked(proc, new_node, fp);
+ binder_inner_proc_unlock(proc);
+ if (node != new_node)
+ /*
+ * The node was already added by another thread
+ */
+ kfree(new_node);
+
+ return node;
+}
+
+static void binder_free_node(struct binder_node *node)
+{
+ kfree(node);
+ binder_stats_deleted(BINDER_STAT_NODE);
+}
+
+static int binder_inc_node_nilocked(struct binder_node *node, int strong,
+ int internal,
+ struct list_head *target_list)
+{
+ struct binder_proc *proc = node->proc;
+
+ assert_spin_locked(&node->lock);
+ if (proc)
+ assert_spin_locked(&proc->inner_lock);
if (strong) {
if (internal) {
if (target_list == NULL &&
@@ -978,8 +1214,8 @@ static int binder_inc_node(struct binder_node *node, int strong, int internal,
} else
node->local_strong_refs++;
if (!node->has_strong_ref && target_list) {
- list_del_init(&node->work.entry);
- list_add_tail(&node->work.entry, target_list);
+ binder_dequeue_work_ilocked(&node->work);
+ binder_enqueue_work_ilocked(&node->work, target_list);
}
} else {
if (!internal)
@@ -990,58 +1226,169 @@ static int binder_inc_node(struct binder_node *node, int strong, int internal,
node->debug_id);
return -EINVAL;
}
- list_add_tail(&node->work.entry, target_list);
+ binder_enqueue_work_ilocked(&node->work, target_list);
}
}
return 0;
}
-static int binder_dec_node(struct binder_node *node, int strong, int internal)
+static int binder_inc_node(struct binder_node *node, int strong, int internal,
+ struct list_head *target_list)
+{
+ int ret;
+
+ binder_node_inner_lock(node);
+ ret = binder_inc_node_nilocked(node, strong, internal, target_list);
+ binder_node_inner_unlock(node);
+
+ return ret;
+}
+
+static bool binder_dec_node_nilocked(struct binder_node *node,
+ int strong, int internal)
{
+ struct binder_proc *proc = node->proc;
+
+ assert_spin_locked(&node->lock);
+ if (proc)
+ assert_spin_locked(&proc->inner_lock);
if (strong) {
if (internal)
node->internal_strong_refs--;
else
node->local_strong_refs--;
if (node->local_strong_refs || node->internal_strong_refs)
- return 0;
+ return false;
} else {
if (!internal)
node->local_weak_refs--;
- if (node->local_weak_refs || !hlist_empty(&node->refs))
- return 0;
+ if (node->local_weak_refs || node->tmp_refs ||
+ !hlist_empty(&node->refs))
+ return false;
}
- if (node->proc && (node->has_strong_ref || node->has_weak_ref)) {
+
+ if (proc && (node->has_strong_ref || node->has_weak_ref)) {
if (list_empty(&node->work.entry)) {
- list_add_tail(&node->work.entry, &node->proc->todo);
- wake_up_interruptible(&node->proc->wait);
+ binder_enqueue_work_ilocked(&node->work, &proc->todo);
+ binder_wakeup_proc_ilocked(proc);
}
} else {
if (hlist_empty(&node->refs) && !node->local_strong_refs &&
- !node->local_weak_refs) {
- list_del_init(&node->work.entry);
- if (node->proc) {
- rb_erase(&node->rb_node, &node->proc->nodes);
+ !node->local_weak_refs && !node->tmp_refs) {
+ if (proc) {
+ binder_dequeue_work_ilocked(&node->work);
+ rb_erase(&node->rb_node, &proc->nodes);
binder_debug(BINDER_DEBUG_INTERNAL_REFS,
"refless node %d deleted\n",
node->debug_id);
} else {
+ BUG_ON(!list_empty(&node->work.entry));
+ spin_lock(&binder_dead_nodes_lock);
+ /*
+ * tmp_refs could have changed so
+ * check it again
+ */
+ if (node->tmp_refs) {
+ spin_unlock(&binder_dead_nodes_lock);
+ return false;
+ }
hlist_del(&node->dead_node);
+ spin_unlock(&binder_dead_nodes_lock);
binder_debug(BINDER_DEBUG_INTERNAL_REFS,
"dead node %d deleted\n",
node->debug_id);
}
- kfree(node);
- binder_stats_deleted(BINDER_STAT_NODE);
+ return true;
}
}
+ return false;
+}
- return 0;
+static void binder_dec_node(struct binder_node *node, int strong, int internal)
+{
+ bool free_node;
+
+ binder_node_inner_lock(node);
+ free_node = binder_dec_node_nilocked(node, strong, internal);
+ binder_node_inner_unlock(node);
+ if (free_node)
+ binder_free_node(node);
+}
+
+static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
+{
+ /*
+ * No call to binder_inc_node() is needed since we
+ * don't need to inform userspace of any changes to
+ * tmp_refs
+ */
+ node->tmp_refs++;
+}
+
+/**
+ * binder_inc_node_tmpref() - take a temporary reference on node
+ * @node: node to reference
+ *
+ * Take reference on node to prevent the node from being freed
+ * while referenced only by a local variable. The inner lock is
+ * needed to serialize with the node work on the queue (which
+ * isn't needed after the node is dead). If the node is dead
+ * (node->proc is NULL), use binder_dead_nodes_lock to protect
+ * node->tmp_refs against dead-node-only cases where the node
+ * lock cannot be acquired (eg traversing the dead node list to
+ * print nodes)
+ */
+static void binder_inc_node_tmpref(struct binder_node *node)
+{
+ binder_node_lock(node);
+ if (node->proc)
+ binder_inner_proc_lock(node->proc);
+ else
+ spin_lock(&binder_dead_nodes_lock);
+ binder_inc_node_tmpref_ilocked(node);
+ if (node->proc)
+ binder_inner_proc_unlock(node->proc);
+ else
+ spin_unlock(&binder_dead_nodes_lock);
+ binder_node_unlock(node);
+}
+
+/**
+ * binder_dec_node_tmpref() - remove a temporary reference on node
+ * @node: node to reference
+ *
+ * Release temporary reference on node taken via binder_inc_node_tmpref()
+ */
+static void binder_dec_node_tmpref(struct binder_node *node)
+{
+ bool free_node;
+
+ binder_node_inner_lock(node);
+ if (!node->proc)
+ spin_lock(&binder_dead_nodes_lock);
+ node->tmp_refs--;
+ BUG_ON(node->tmp_refs < 0);
+ if (!node->proc)
+ spin_unlock(&binder_dead_nodes_lock);
+ /*
+ * Call binder_dec_node() to check if all refcounts are 0
+ * and cleanup is needed. Calling with strong=0 and internal=1
+ * causes no actual reference to be released in binder_dec_node().
+ * If that changes, a change is needed here too.
+ */
+ free_node = binder_dec_node_nilocked(node, 0, 1);
+ binder_node_inner_unlock(node);
+ if (free_node)
+ binder_free_node(node);
}
+static void binder_put_node(struct binder_node *node)
+{
+ binder_dec_node_tmpref(node);
+}
-static struct binder_ref *binder_get_ref(struct binder_proc *proc,
- u32 desc, bool need_strong_ref)
+static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
+ u32 desc, bool need_strong_ref)
{
struct rb_node *n = proc->refs_by_desc.rb_node;
struct binder_ref *ref;
@@ -1049,11 +1396,11 @@ static struct binder_ref *binder_get_ref(struct binder_proc *proc,
while (n) {
ref = rb_entry(n, struct binder_ref, rb_node_desc);
- if (desc < ref->desc) {
+ if (desc < ref->data.desc) {
n = n->rb_left;
- } else if (desc > ref->desc) {
+ } else if (desc > ref->data.desc) {
n = n->rb_right;
- } else if (need_strong_ref && !ref->strong) {
+ } else if (need_strong_ref && !ref->data.strong) {
binder_user_error("tried to use weak ref as strong ref\n");
return NULL;
} else {
@@ -1063,14 +1410,34 @@ static struct binder_ref *binder_get_ref(struct binder_proc *proc,
return NULL;
}
-static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc,
- struct binder_node *node)
+/**
+ * binder_get_ref_for_node_olocked() - get the ref associated with given node
+ * @proc: binder_proc that owns the ref
+ * @node: binder_node of target
+ * @new_ref: newly allocated binder_ref to be initialized or %NULL
+ *
+ * Look up the ref for the given node and return it if it exists
+ *
+ * If it doesn't exist and the caller provides a newly allocated
+ * ref, initialize the fields of the newly allocated ref and insert
+ * into the given proc rb_trees and node refs list.
+ *
+ * Return: the ref for node. It is possible that another thread
+ * allocated/initialized the ref first in which case the
+ * returned ref would be different than the passed-in
+ * new_ref. new_ref must be kfree'd by the caller in
+ * this case.
+ */
+static struct binder_ref *binder_get_ref_for_node_olocked(
+ struct binder_proc *proc,
+ struct binder_node *node,
+ struct binder_ref *new_ref)
{
- struct rb_node *n;
+ struct binder_context *context = proc->context;
struct rb_node **p = &proc->refs_by_node.rb_node;
struct rb_node *parent = NULL;
- struct binder_ref *ref, *new_ref;
- struct binder_context *context = proc->context;
+ struct binder_ref *ref;
+ struct rb_node *n;
while (*p) {
parent = *p;
@@ -1083,22 +1450,22 @@ static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc,
else
return ref;
}
- new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
- if (new_ref == NULL)
+ if (!new_ref)
return NULL;
+
binder_stats_created(BINDER_STAT_REF);
- new_ref->debug_id = ++binder_last_id;
+ new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
new_ref->proc = proc;
new_ref->node = node;
rb_link_node(&new_ref->rb_node_node, parent, p);
rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
- new_ref->desc = (node == context->binder_context_mgr_node) ? 0 : 1;
+ new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1;
for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
ref = rb_entry(n, struct binder_ref, rb_node_desc);
- if (ref->desc > new_ref->desc)
+ if (ref->data.desc > new_ref->data.desc)
break;
- new_ref->desc = ref->desc + 1;
+ new_ref->data.desc = ref->data.desc + 1;
}
p = &proc->refs_by_desc.rb_node;
@@ -1106,121 +1473,423 @@ static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc,
parent = *p;
ref = rb_entry(parent, struct binder_ref, rb_node_desc);
- if (new_ref->desc < ref->desc)
+ if (new_ref->data.desc < ref->data.desc)
p = &(*p)->rb_left;
- else if (new_ref->desc > ref->desc)
+ else if (new_ref->data.desc > ref->data.desc)
p = &(*p)->rb_right;
else
BUG();
}
rb_link_node(&new_ref->rb_node_desc, parent, p);
rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
- if (node) {
- hlist_add_head(&new_ref->node_entry, &node->refs);
- binder_debug(BINDER_DEBUG_INTERNAL_REFS,
- "%d new ref %d desc %d for node %d\n",
- proc->pid, new_ref->debug_id, new_ref->desc,
- node->debug_id);
- } else {
- binder_debug(BINDER_DEBUG_INTERNAL_REFS,
- "%d new ref %d desc %d for dead node\n",
- proc->pid, new_ref->debug_id, new_ref->desc);
- }
+ binder_node_lock(node);
+ hlist_add_head(&new_ref->node_entry, &node->refs);
+
+ binder_debug(BINDER_DEBUG_INTERNAL_REFS,
+ "%d new ref %d desc %d for node %d\n",
+ proc->pid, new_ref->data.debug_id, new_ref->data.desc,
+ node->debug_id);
+ binder_node_unlock(node);
return new_ref;
}
-static void binder_delete_ref(struct binder_ref *ref)
+static void binder_cleanup_ref_olocked(struct binder_ref *ref)
{
+ bool delete_node = false;
+
binder_debug(BINDER_DEBUG_INTERNAL_REFS,
"%d delete ref %d desc %d for node %d\n",
- ref->proc->pid, ref->debug_id, ref->desc,
+ ref->proc->pid, ref->data.debug_id, ref->data.desc,
ref->node->debug_id);
rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
- if (ref->strong)
- binder_dec_node(ref->node, 1, 1);
+
+ binder_node_inner_lock(ref->node);
+ if (ref->data.strong)
+ binder_dec_node_nilocked(ref->node, 1, 1);
+
hlist_del(&ref->node_entry);
- binder_dec_node(ref->node, 0, 1);
+ delete_node = binder_dec_node_nilocked(ref->node, 0, 1);
+ binder_node_inner_unlock(ref->node);
+ /*
+ * Clear ref->node unless we want the caller to free the node
+ */
+ if (!delete_node) {
+ /*
+ * The caller uses ref->node to determine
+ * whether the node needs to be freed. Clear
+ * it since the node is still alive.
+ */
+ ref->node = NULL;
+ }
+
if (ref->death) {
binder_debug(BINDER_DEBUG_DEAD_BINDER,
"%d delete ref %d desc %d has death notification\n",
- ref->proc->pid, ref->debug_id, ref->desc);
- list_del(&ref->death->work.entry);
- kfree(ref->death);
+ ref->proc->pid, ref->data.debug_id,
+ ref->data.desc);
+ binder_dequeue_work(ref->proc, &ref->death->work);
binder_stats_deleted(BINDER_STAT_DEATH);
}
- kfree(ref);
binder_stats_deleted(BINDER_STAT_REF);
}
-static int binder_inc_ref(struct binder_ref *ref, int strong,
- struct list_head *target_list)
+/**
+ * binder_inc_ref_olocked() - increment the ref for given handle
+ * @ref: ref to be incremented
+ * @strong: if true, strong increment, else weak
+ * @target_list: list to queue node work on
+ *
+ * Increment the ref. @ref->proc->outer_lock must be held on entry
+ *
+ * Return: 0, if successful, else errno
+ */
+static int binder_inc_ref_olocked(struct binder_ref *ref, int strong,
+ struct list_head *target_list)
{
int ret;
if (strong) {
- if (ref->strong == 0) {
+ if (ref->data.strong == 0) {
ret = binder_inc_node(ref->node, 1, 1, target_list);
if (ret)
return ret;
}
- ref->strong++;
+ ref->data.strong++;
} else {
- if (ref->weak == 0) {
+ if (ref->data.weak == 0) {
ret = binder_inc_node(ref->node, 0, 1, target_list);
if (ret)
return ret;
}
- ref->weak++;
+ ref->data.weak++;
}
return 0;
}
-
-static int binder_dec_ref(struct binder_ref *ref, int strong)
+/**
+ * binder_dec_ref() - dec the ref for given handle
+ * @ref: ref to be decremented
+ * @strong: if true, strong decrement, else weak
+ *
+ * Decrement the ref.
+ *
+ * Return: true if ref is cleaned up and ready to be freed
+ */
+static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong)
{
if (strong) {
- if (ref->strong == 0) {
+ if (ref->data.strong == 0) {
binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
- ref->proc->pid, ref->debug_id,
- ref->desc, ref->strong, ref->weak);
- return -EINVAL;
- }
- ref->strong--;
- if (ref->strong == 0) {
- int ret;
-
- ret = binder_dec_node(ref->node, strong, 1);
- if (ret)
- return ret;
+ ref->proc->pid, ref->data.debug_id,
+ ref->data.desc, ref->data.strong,
+ ref->data.weak);
+ return false;
}
+ ref->data.strong--;
+ if (ref->data.strong == 0)
+ binder_dec_node(ref->node, strong, 1);
} else {
- if (ref->weak == 0) {
+ if (ref->data.weak == 0) {
binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
- ref->proc->pid, ref->debug_id,
- ref->desc, ref->strong, ref->weak);
- return -EINVAL;
+ ref->proc->pid, ref->data.debug_id,
+ ref->data.desc, ref->data.strong,
+ ref->data.weak);
+ return false;
}
- ref->weak--;
+ ref->data.weak--;
}
- if (ref->strong == 0 && ref->weak == 0)
- binder_delete_ref(ref);
- return 0;
+ if (ref->data.strong == 0 && ref->data.weak == 0) {
+ binder_cleanup_ref_olocked(ref);
+ return true;
+ }
+ return false;
}
-static void binder_pop_transaction(struct binder_thread *target_thread,
- struct binder_transaction *t)
+/**
+ * binder_get_node_from_ref() - get the node from the given proc/desc
+ * @proc: proc containing the ref
+ * @desc: the handle associated with the ref
+ * @need_strong_ref: if true, only return node if ref is strong
+ * @rdata: the id/refcount data for the ref
+ *
+ * Given a proc and ref handle, return the associated binder_node
+ *
+ * Return: a binder_node or NULL if not found or not strong when strong required
+ */
+static struct binder_node *binder_get_node_from_ref(
+ struct binder_proc *proc,
+ u32 desc, bool need_strong_ref,
+ struct binder_ref_data *rdata)
{
- if (target_thread) {
- BUG_ON(target_thread->transaction_stack != t);
- BUG_ON(target_thread->transaction_stack->from != target_thread);
- target_thread->transaction_stack =
- target_thread->transaction_stack->from_parent;
- t->from = NULL;
+ struct binder_node *node;
+ struct binder_ref *ref;
+
+ binder_proc_lock(proc);
+ ref = binder_get_ref_olocked(proc, desc, need_strong_ref);
+ if (!ref)
+ goto err_no_ref;
+ node = ref->node;
+ /*
+ * Take an implicit reference on the node to ensure
+ * it stays alive until the call to binder_put_node()
+ */
+ binder_inc_node_tmpref(node);
+ if (rdata)
+ *rdata = ref->data;
+ binder_proc_unlock(proc);
+
+ return node;
+
+err_no_ref:
+ binder_proc_unlock(proc);
+ return NULL;
+}
+
+/**
+ * binder_free_ref() - free the binder_ref
+ * @ref: ref to free
+ *
+ * Free the binder_ref. Free the binder_node indicated by ref->node
+ * (if non-NULL) and the binder_ref_death indicated by ref->death.
+ */
+static void binder_free_ref(struct binder_ref *ref)
+{
+ if (ref->node)
+ binder_free_node(ref->node);
+ kfree(ref->death);
+ kfree(ref);
+}
+
+/**
+ * binder_update_ref_for_handle() - inc/dec the ref for given handle
+ * @proc: proc containing the ref
+ * @desc: the handle associated with the ref
+ * @increment: true=inc reference, false=dec reference
+ * @strong: true=strong reference, false=weak reference
+ * @rdata: the id/refcount data for the ref
+ *
+ * Given a proc and ref handle, increment or decrement the ref
+ * according to "increment" arg.
+ *
+ * Return: 0 if successful, else errno
+ */
+static int binder_update_ref_for_handle(struct binder_proc *proc,
+ uint32_t desc, bool increment, bool strong,
+ struct binder_ref_data *rdata)
+{
+ int ret = 0;
+ struct binder_ref *ref;
+ bool delete_ref = false;
+
+ binder_proc_lock(proc);
+ ref = binder_get_ref_olocked(proc, desc, strong);
+ if (!ref) {
+ ret = -EINVAL;
+ goto err_no_ref;
+ }
+ if (increment)
+ ret = binder_inc_ref_olocked(ref, strong, NULL);
+ else
+ delete_ref = binder_dec_ref_olocked(ref, strong);
+
+ if (rdata)
+ *rdata = ref->data;
+ binder_proc_unlock(proc);
+
+ if (delete_ref)
+ binder_free_ref(ref);
+ return ret;
+
+err_no_ref:
+ binder_proc_unlock(proc);
+ return ret;
+}
+
+/**
+ * binder_dec_ref_for_handle() - dec the ref for given handle
+ * @proc: proc containing the ref
+ * @desc: the handle associated with the ref
+ * @strong: true=strong reference, false=weak reference
+ * @rdata: the id/refcount data for the ref
+ *
+ * Just calls binder_update_ref_for_handle() to decrement the ref.
+ *
+ * Return: 0 if successful, else errno
+ */
+static int binder_dec_ref_for_handle(struct binder_proc *proc,
+ uint32_t desc, bool strong, struct binder_ref_data *rdata)
+{
+ return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
+}
+
+
+/**
+ * binder_inc_ref_for_node() - increment the ref for given proc/node
+ * @proc: proc containing the ref
+ * @node: target node
+ * @strong: true=strong reference, false=weak reference
+ * @target_list: worklist to use if node is incremented
+ * @rdata: the id/refcount data for the ref
+ *
+ * Given a proc and node, increment the ref. Create the ref if it
+ * doesn't already exist
+ *
+ * Return: 0 if successful, else errno
+ */
+static int binder_inc_ref_for_node(struct binder_proc *proc,
+ struct binder_node *node,
+ bool strong,
+ struct list_head *target_list,
+ struct binder_ref_data *rdata)
+{
+ struct binder_ref *ref;
+ struct binder_ref *new_ref = NULL;
+ int ret = 0;
+
+ binder_proc_lock(proc);
+ ref = binder_get_ref_for_node_olocked(proc, node, NULL);
+ if (!ref) {
+ binder_proc_unlock(proc);
+ new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
+ if (!new_ref)
+ return -ENOMEM;
+ binder_proc_lock(proc);
+ ref = binder_get_ref_for_node_olocked(proc, node, new_ref);
+ }
+ ret = binder_inc_ref_olocked(ref, strong, target_list);
+ *rdata = ref->data;
+ binder_proc_unlock(proc);
+ if (new_ref && ref != new_ref)
+ /*
+ * Another thread created the ref first so
+ * free the one we allocated
+ */
+ kfree(new_ref);
+ return ret;
+}
+
+static void binder_pop_transaction_ilocked(struct binder_thread *target_thread,
+ struct binder_transaction *t)
+{
+ BUG_ON(!target_thread);
+ assert_spin_locked(&target_thread->proc->inner_lock);
+ BUG_ON(target_thread->transaction_stack != t);
+ BUG_ON(target_thread->transaction_stack->from != target_thread);
+ target_thread->transaction_stack =
+ target_thread->transaction_stack->from_parent;
+ t->from = NULL;
+}
+
+/**
+ * binder_thread_dec_tmpref() - decrement thread->tmp_ref
+ * @thread: thread to decrement
+ *
+ * A thread needs to be kept alive while being used to create or
+ * handle a transaction. binder_get_txn_from() is used to safely
+ * extract t->from from a binder_transaction and keep the thread
+ * indicated by t->from from being freed. When done with that
+ * binder_thread, this function is called to decrement the
+ * tmp_ref and free if appropriate (thread has been released
+ * and no transaction being processed by the driver)
+ */
+static void binder_thread_dec_tmpref(struct binder_thread *thread)
+{
+ /*
+ * atomic is used to protect the counter value while
+ * it cannot reach zero or thread->is_dead is false
+ */
+ binder_inner_proc_lock(thread->proc);
+ atomic_dec(&thread->tmp_ref);
+ if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
+ binder_inner_proc_unlock(thread->proc);
+ binder_free_thread(thread);
+ return;
+ }
+ binder_inner_proc_unlock(thread->proc);
+}
+
+/**
+ * binder_proc_dec_tmpref() - decrement proc->tmp_ref
+ * @proc: proc to decrement
+ *
+ * A binder_proc needs to be kept alive while being used to create or
+ * handle a transaction. proc->tmp_ref is incremented when
+ * creating a new transaction or the binder_proc is currently in-use
+ * by threads that are being released. When done with the binder_proc,
+ * this function is called to decrement the counter and free the
+ * proc if appropriate (proc has been released, all threads have
+ * been released and not currenly in-use to process a transaction).
+ */
+static void binder_proc_dec_tmpref(struct binder_proc *proc)
+{
+ binder_inner_proc_lock(proc);
+ proc->tmp_ref--;
+ if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
+ !proc->tmp_ref) {
+ binder_inner_proc_unlock(proc);
+ binder_free_proc(proc);
+ return;
+ }
+ binder_inner_proc_unlock(proc);
+}
+
+/**
+ * binder_get_txn_from() - safely extract the "from" thread in transaction
+ * @t: binder transaction for t->from
+ *
+ * Atomically return the "from" thread and increment the tmp_ref
+ * count for the thread to ensure it stays alive until
+ * binder_thread_dec_tmpref() is called.
+ *
+ * Return: the value of t->from
+ */
+static struct binder_thread *binder_get_txn_from(
+ struct binder_transaction *t)
+{
+ struct binder_thread *from;
+
+ spin_lock(&t->lock);
+ from = t->from;
+ if (from)
+ atomic_inc(&from->tmp_ref);
+ spin_unlock(&t->lock);
+ return from;
+}
+
+/**
+ * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock
+ * @t: binder transaction for t->from
+ *
+ * Same as binder_get_txn_from() except it also acquires the proc->inner_lock
+ * to guarantee that the thread cannot be released while operating on it.
+ * The caller must call binder_inner_proc_unlock() to release the inner lock
+ * as well as call binder_dec_thread_txn() to release the reference.
+ *
+ * Return: the value of t->from
+ */
+static struct binder_thread *binder_get_txn_from_and_acq_inner(
+ struct binder_transaction *t)
+{
+ struct binder_thread *from;
+
+ from = binder_get_txn_from(t);
+ if (!from)
+ return NULL;
+ binder_inner_proc_lock(from->proc);
+ if (t->from) {
+ BUG_ON(from != t->from);
+ return from;
}
- t->need_reply = 0;
+ binder_inner_proc_unlock(from->proc);
+ binder_thread_dec_tmpref(from);
+ return NULL;
+}
+
+static void binder_free_transaction(struct binder_transaction *t)
+{
if (t->buffer)
t->buffer->transaction = NULL;
kfree(t);
@@ -1235,30 +1904,28 @@ static void binder_send_failed_reply(struct binder_transaction *t,
BUG_ON(t->flags & TF_ONE_WAY);
while (1) {
- target_thread = t->from;
+ target_thread = binder_get_txn_from_and_acq_inner(t);
if (target_thread) {
- if (target_thread->return_error != BR_OK &&
- target_thread->return_error2 == BR_OK) {
- target_thread->return_error2 =
- target_thread->return_error;
- target_thread->return_error = BR_OK;
- }
- if (target_thread->return_error == BR_OK) {
- binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
- "send failed reply for transaction %d to %d:%d\n",
- t->debug_id,
- target_thread->proc->pid,
- target_thread->pid);
-
- binder_pop_transaction(target_thread, t);
- target_thread->return_error = error_code;
+ binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
+ "send failed reply for transaction %d to %d:%d\n",
+ t->debug_id,
+ target_thread->proc->pid,
+ target_thread->pid);
+
+ binder_pop_transaction_ilocked(target_thread, t);
+ if (target_thread->reply_error.cmd == BR_OK) {
+ target_thread->reply_error.cmd = error_code;
+ binder_enqueue_work_ilocked(
+ &target_thread->reply_error.work,
+ &target_thread->todo);
wake_up_interruptible(&target_thread->wait);
} else {
- pr_err("reply failed, target thread, %d:%d, has error code %d already\n",
- target_thread->proc->pid,
- target_thread->pid,
- target_thread->return_error);
+ WARN(1, "Unexpected reply error: %u\n",
+ target_thread->reply_error.cmd);
}
+ binder_inner_proc_unlock(target_thread->proc);
+ binder_thread_dec_tmpref(target_thread);
+ binder_free_transaction(t);
return;
}
next = t->from_parent;
@@ -1267,7 +1934,7 @@ static void binder_send_failed_reply(struct binder_transaction *t,
"send failed reply for transaction %d, target dead\n",
t->debug_id);
- binder_pop_transaction(target_thread, t);
+ binder_free_transaction(t);
if (next == NULL) {
binder_debug(BINDER_DEBUG_DEAD_BINDER,
"reply failed, no target thread at root\n");
@@ -1476,24 +2143,26 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
node->debug_id, (u64)node->ptr);
binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
0);
+ binder_put_node(node);
} break;
case BINDER_TYPE_HANDLE:
case BINDER_TYPE_WEAK_HANDLE: {
struct flat_binder_object *fp;
- struct binder_ref *ref;
+ struct binder_ref_data rdata;
+ int ret;
fp = to_flat_binder_object(hdr);
- ref = binder_get_ref(proc, fp->handle,
- hdr->type == BINDER_TYPE_HANDLE);
- if (ref == NULL) {
- pr_err("transaction release %d bad handle %d\n",
- debug_id, fp->handle);
+ ret = binder_dec_ref_for_handle(proc, fp->handle,
+ hdr->type == BINDER_TYPE_HANDLE, &rdata);
+
+ if (ret) {
+ pr_err("transaction release %d bad handle %d, ret = %d\n",
+ debug_id, fp->handle, ret);
break;
}
binder_debug(BINDER_DEBUG_TRANSACTION,
- " ref %d desc %d (node %d)\n",
- ref->debug_id, ref->desc, ref->node->debug_id);
- binder_dec_ref(ref, hdr->type == BINDER_TYPE_HANDLE);
+ " ref %d desc %d\n",
+ rdata.debug_id, rdata.desc);
} break;
case BINDER_TYPE_FD: {
@@ -1532,7 +2201,8 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
* back to kernel address space to access it
*/
parent_buffer = parent->buffer -
- proc->user_buffer_offset;
+ binder_alloc_get_user_buffer_offset(
+ &proc->alloc);
fd_buf_size = sizeof(u32) * fda->num_fds;
if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
@@ -1564,102 +2234,122 @@ static int binder_translate_binder(struct flat_binder_object *fp,
struct binder_thread *thread)
{
struct binder_node *node;
- struct binder_ref *ref;
struct binder_proc *proc = thread->proc;
struct binder_proc *target_proc = t->to_proc;
+ struct binder_ref_data rdata;
+ int ret = 0;
node = binder_get_node(proc, fp->binder);
if (!node) {
- node = binder_new_node(proc, fp->binder, fp->cookie);
+ node = binder_new_node(proc, fp);
if (!node)
return -ENOMEM;
-
- node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
- node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
}
if (fp->cookie != node->cookie) {
binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
proc->pid, thread->pid, (u64)fp->binder,
node->debug_id, (u64)fp->cookie,
(u64)node->cookie);
- return -EINVAL;
+ ret = -EINVAL;
+ goto done;
+ }
+ if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
+ ret = -EPERM;
+ goto done;
}
- if (security_binder_transfer_binder(proc->tsk, target_proc->tsk))
- return -EPERM;
- ref = binder_get_ref_for_node(target_proc, node);
- if (!ref)
- return -EINVAL;
+ ret = binder_inc_ref_for_node(target_proc, node,
+ fp->hdr.type == BINDER_TYPE_BINDER,
+ &thread->todo, &rdata);
+ if (ret)
+ goto done;
if (fp->hdr.type == BINDER_TYPE_BINDER)
fp->hdr.type = BINDER_TYPE_HANDLE;
else
fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
fp->binder = 0;
- fp->handle = ref->desc;
+ fp->handle = rdata.desc;
fp->cookie = 0;
- binder_inc_ref(ref, fp->hdr.type == BINDER_TYPE_HANDLE, &thread->todo);
- trace_binder_transaction_node_to_ref(t, node, ref);
+ trace_binder_transaction_node_to_ref(t, node, &rdata);
binder_debug(BINDER_DEBUG_TRANSACTION,
" node %d u%016llx -> ref %d desc %d\n",
node->debug_id, (u64)node->ptr,
- ref->debug_id, ref->desc);
-
- return 0;
+ rdata.debug_id, rdata.desc);
+done:
+ binder_put_node(node);
+ return ret;
}
static int binder_translate_handle(struct flat_binder_object *fp,
struct binder_transaction *t,
struct binder_thread *thread)
{
- struct binder_ref *ref;
struct binder_proc *proc = thread->proc;
struct binder_proc *target_proc = t->to_proc;
+ struct binder_node *node;
+ struct binder_ref_data src_rdata;
+ int ret = 0;
- ref = binder_get_ref(proc, fp->handle,
- fp->hdr.type == BINDER_TYPE_HANDLE);
- if (!ref) {
+ node = binder_get_node_from_ref(proc, fp->handle,
+ fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
+ if (!node) {
binder_user_error("%d:%d got transaction with invalid handle, %d\n",
proc->pid, thread->pid, fp->handle);
return -EINVAL;
}
- if (security_binder_transfer_binder(proc->tsk, target_proc->tsk))
- return -EPERM;
+ if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
+ ret = -EPERM;
+ goto done;
+ }
- if (ref->node->proc == target_proc) {
+ binder_node_lock(node);
+ if (node->proc == target_proc) {
if (fp->hdr.type == BINDER_TYPE_HANDLE)
fp->hdr.type = BINDER_TYPE_BINDER;
else
fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
- fp->binder = ref->node->ptr;
- fp->cookie = ref->node->cookie;
- binder_inc_node(ref->node, fp->hdr.type == BINDER_TYPE_BINDER,
- 0, NULL);
- trace_binder_transaction_ref_to_node(t, ref);
+ fp->binder = node->ptr;
+ fp->cookie = node->cookie;
+ if (node->proc)
+ binder_inner_proc_lock(node->proc);
+ binder_inc_node_nilocked(node,
+ fp->hdr.type == BINDER_TYPE_BINDER,
+ 0, NULL);
+ if (node->proc)
+ binder_inner_proc_unlock(node->proc);
+ trace_binder_transaction_ref_to_node(t, node, &src_rdata);
binder_debug(BINDER_DEBUG_TRANSACTION,
" ref %d desc %d -> node %d u%016llx\n",
- ref->debug_id, ref->desc, ref->node->debug_id,
- (u64)ref->node->ptr);
+ src_rdata.debug_id, src_rdata.desc, node->debug_id,
+ (u64)node->ptr);
+ binder_node_unlock(node);
} else {
- struct binder_ref *new_ref;
+ int ret;
+ struct binder_ref_data dest_rdata;
- new_ref = binder_get_ref_for_node(target_proc, ref->node);
- if (!new_ref)
- return -EINVAL;
+ binder_node_unlock(node);
+ ret = binder_inc_ref_for_node(target_proc, node,
+ fp->hdr.type == BINDER_TYPE_HANDLE,
+ NULL, &dest_rdata);
+ if (ret)
+ goto done;
fp->binder = 0;
- fp->handle = new_ref->desc;
+ fp->handle = dest_rdata.desc;
fp->cookie = 0;
- binder_inc_ref(new_ref, fp->hdr.type == BINDER_TYPE_HANDLE,
- NULL);
- trace_binder_transaction_ref_to_ref(t, ref, new_ref);
+ trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
+ &dest_rdata);
binder_debug(BINDER_DEBUG_TRANSACTION,
" ref %d desc %d -> ref %d desc %d (node %d)\n",
- ref->debug_id, ref->desc, new_ref->debug_id,
- new_ref->desc, ref->node->debug_id);
+ src_rdata.debug_id, src_rdata.desc,
+ dest_rdata.debug_id, dest_rdata.desc,
+ node->debug_id);
}
- return 0;
+done:
+ binder_put_node(node);
+ return ret;
}
static int binder_translate_fd(int fd,
@@ -1750,7 +2440,8 @@ static int binder_translate_fd_array(struct binder_fd_array_object *fda,
* Since the parent was already fixed up, convert it
* back to the kernel address space to access it
*/
- parent_buffer = parent->buffer - target_proc->user_buffer_offset;
+ parent_buffer = parent->buffer -
+ binder_alloc_get_user_buffer_offset(&target_proc->alloc);
fd_array = (u32 *)(parent_buffer + fda->parent_offset);
if (!IS_ALIGNED((unsigned long)fd_array, sizeof(u32))) {
binder_user_error("%d:%d parent offset not aligned correctly.\n",
@@ -1818,12 +2509,80 @@ static int binder_fixup_parent(struct binder_transaction *t,
return -EINVAL;
}
parent_buffer = (u8 *)(parent->buffer -
- target_proc->user_buffer_offset);
+ binder_alloc_get_user_buffer_offset(
+ &target_proc->alloc));
*(binder_uintptr_t *)(parent_buffer + bp->parent_offset) = bp->buffer;
return 0;
}
+/**
+ * binder_proc_transaction() - sends a transaction to a process and wakes it up
+ * @t: transaction to send
+ * @proc: process to send the transaction to
+ * @thread: thread in @proc to send the transaction to (may be NULL)
+ *
+ * This function queues a transaction to the specified process. It will try
+ * to find a thread in the target process to handle the transaction and
+ * wake it up. If no thread is found, the work is queued to the proc
+ * waitqueue.
+ *
+ * If the @thread parameter is not NULL, the transaction is always queued
+ * to the waitlist of that specific thread.
+ *
+ * Return: true if the transactions was successfully queued
+ * false if the target process or thread is dead
+ */
+static bool binder_proc_transaction(struct binder_transaction *t,
+ struct binder_proc *proc,
+ struct binder_thread *thread)
+{
+ struct list_head *target_list = NULL;
+ struct binder_node *node = t->buffer->target_node;
+ bool oneway = !!(t->flags & TF_ONE_WAY);
+ bool wakeup = true;
+
+ BUG_ON(!node);
+ binder_node_lock(node);
+ if (oneway) {
+ BUG_ON(thread);
+ if (node->has_async_transaction) {
+ target_list = &node->async_todo;
+ wakeup = false;
+ } else {
+ node->has_async_transaction = 1;
+ }
+ }
+
+ binder_inner_proc_lock(proc);
+
+ if (proc->is_dead || (thread && thread->is_dead)) {
+ binder_inner_proc_unlock(proc);
+ binder_node_unlock(node);
+ return false;
+ }
+
+ if (!thread && !target_list)
+ thread = binder_select_thread_ilocked(proc);
+
+ if (thread)
+ target_list = &thread->todo;
+ else if (!target_list)
+ target_list = &proc->todo;
+ else
+ BUG_ON(target_list != &node->async_todo);
+
+ binder_enqueue_work_ilocked(&t->work, target_list);
+
+ if (wakeup)
+ binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
+
+ binder_inner_proc_unlock(proc);
+ binder_node_unlock(node);
+
+ return true;
+}
+
static void binder_transaction(struct binder_proc *proc,
struct binder_thread *thread,
struct binder_transaction_data *tr, int reply,
@@ -1835,19 +2594,21 @@ static void binder_transaction(struct binder_proc *proc,
binder_size_t *offp, *off_end, *off_start;
binder_size_t off_min;
u8 *sg_bufp, *sg_buf_end;
- struct binder_proc *target_proc;
+ struct binder_proc *target_proc = NULL;
struct binder_thread *target_thread = NULL;
struct binder_node *target_node = NULL;
- struct list_head *target_list;
- wait_queue_head_t *target_wait;
struct binder_transaction *in_reply_to = NULL;
struct binder_transaction_log_entry *e;
- uint32_t return_error;
+ uint32_t return_error = 0;
+ uint32_t return_error_param = 0;
+ uint32_t return_error_line = 0;
struct binder_buffer_object *last_fixup_obj = NULL;
binder_size_t last_fixup_min_off = 0;
struct binder_context *context = proc->context;
+ int t_debug_id = atomic_inc_return(&binder_last_id);
e = binder_transaction_log_add(&binder_transaction_log);
+ e->debug_id = t_debug_id;
e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
e->from_proc = proc->pid;
e->from_thread = thread->pid;
@@ -1857,29 +2618,40 @@ static void binder_transaction(struct binder_proc *proc,
e->context_name = proc->context->name;
if (reply) {
+ binder_inner_proc_lock(proc);
in_reply_to = thread->transaction_stack;
if (in_reply_to == NULL) {
+ binder_inner_proc_unlock(proc);
binder_user_error("%d:%d got reply transaction with no transaction stack\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
+ return_error_param = -EPROTO;
+ return_error_line = __LINE__;
goto err_empty_call_stack;
}
- binder_set_nice(in_reply_to->saved_priority);
if (in_reply_to->to_thread != thread) {
+ spin_lock(&in_reply_to->lock);
binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
proc->pid, thread->pid, in_reply_to->debug_id,
in_reply_to->to_proc ?
in_reply_to->to_proc->pid : 0,
in_reply_to->to_thread ?
in_reply_to->to_thread->pid : 0);
+ spin_unlock(&in_reply_to->lock);
+ binder_inner_proc_unlock(proc);
return_error = BR_FAILED_REPLY;
+ return_error_param = -EPROTO;
+ return_error_line = __LINE__;
in_reply_to = NULL;
goto err_bad_call_stack;
}
thread->transaction_stack = in_reply_to->to_parent;
- target_thread = in_reply_to->from;
+ binder_inner_proc_unlock(proc);
+ binder_set_nice(in_reply_to->saved_priority);
+ target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
if (target_thread == NULL) {
return_error = BR_DEAD_REPLY;
+ return_error_line = __LINE__;
goto err_dead_binder;
}
if (target_thread->transaction_stack != in_reply_to) {
@@ -1888,89 +2660,137 @@ static void binder_transaction(struct binder_proc *proc,
target_thread->transaction_stack ?
target_thread->transaction_stack->debug_id : 0,
in_reply_to->debug_id);
+ binder_inner_proc_unlock(target_thread->proc);
return_error = BR_FAILED_REPLY;
+ return_error_param = -EPROTO;
+ return_error_line = __LINE__;
in_reply_to = NULL;
target_thread = NULL;
goto err_dead_binder;
}
target_proc = target_thread->proc;
+ target_proc->tmp_ref++;
+ binder_inner_proc_unlock(target_thread->proc);
} else {
if (tr->target.handle) {
struct binder_ref *ref;
- ref = binder_get_ref(proc, tr->target.handle, true);
- if (ref == NULL) {
+ /*
+ * There must already be a strong ref
+ * on this node. If so, do a strong
+ * increment on the node to ensure it
+ * stays alive until the transaction is
+ * done.
+ */
+ binder_proc_lock(proc);
+ ref = binder_get_ref_olocked(proc, tr->target.handle,
+ true);
+ if (ref) {
+ binder_inc_node(ref->node, 1, 0, NULL);
+ target_node = ref->node;
+ }
+ binder_proc_unlock(proc);
+ if (target_node == NULL) {
binder_user_error("%d:%d got transaction to invalid handle\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
+ return_error_param = -EINVAL;
+ return_error_line = __LINE__;
goto err_invalid_target_handle;
}
- target_node = ref->node;
} else {
+ mutex_lock(&context->context_mgr_node_lock);
target_node = context->binder_context_mgr_node;
if (target_node == NULL) {
return_error = BR_DEAD_REPLY;
+ mutex_unlock(&context->context_mgr_node_lock);
+ return_error_line = __LINE__;
goto err_no_context_mgr_node;
}
+ binder_inc_node(target_node, 1, 0, NULL);
+ mutex_unlock(&context->context_mgr_node_lock);
}
e->to_node = target_node->debug_id;
+ binder_node_lock(target_node);
target_proc = target_node->proc;
if (target_proc == NULL) {
+ binder_node_unlock(target_node);
return_error = BR_DEAD_REPLY;
+ return_error_line = __LINE__;
goto err_dead_binder;
}
+ binder_inner_proc_lock(target_proc);
+ target_proc->tmp_ref++;
+ binder_inner_proc_unlock(target_proc);
+ binder_node_unlock(target_node);
if (security_binder_transaction(proc->tsk,
target_proc->tsk) < 0) {
return_error = BR_FAILED_REPLY;
+ return_error_param = -EPERM;
+ return_error_line = __LINE__;
goto err_invalid_target_handle;
}
+ binder_inner_proc_lock(proc);
if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
struct binder_transaction *tmp;
tmp = thread->transaction_stack;
if (tmp->to_thread != thread) {
+ spin_lock(&tmp->lock);
binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
proc->pid, thread->pid, tmp->debug_id,
tmp->to_proc ? tmp->to_proc->pid : 0,
tmp->to_thread ?
tmp->to_thread->pid : 0);
+ spin_unlock(&tmp->lock);
+ binder_inner_proc_unlock(proc);
return_error = BR_FAILED_REPLY;
+ return_error_param = -EPROTO;
+ return_error_line = __LINE__;
goto err_bad_call_stack;
}
while (tmp) {
- if (tmp->from && tmp->from->proc == target_proc)
- target_thread = tmp->from;
+ struct binder_thread *from;
+
+ spin_lock(&tmp->lock);
+ from = tmp->from;
+ if (from && from->proc == target_proc) {
+ atomic_inc(&from->tmp_ref);
+ target_thread = from;
+ spin_unlock(&tmp->lock);
+ break;
+ }
+ spin_unlock(&tmp->lock);
tmp = tmp->from_parent;
}
}
+ binder_inner_proc_unlock(proc);
}
- if (target_thread) {
+ if (target_thread)
e->to_thread = target_thread->pid;
- target_list = &target_thread->todo;
- target_wait = &target_thread->wait;
- } else {
- target_list = &target_proc->todo;
- target_wait = &target_proc->wait;
- }
e->to_proc = target_proc->pid;
/* TODO: reuse incoming transaction for reply */
t = kzalloc(sizeof(*t), GFP_KERNEL);
if (t == NULL) {
return_error = BR_FAILED_REPLY;
+ return_error_param = -ENOMEM;
+ return_error_line = __LINE__;
goto err_alloc_t_failed;
}
binder_stats_created(BINDER_STAT_TRANSACTION);
+ spin_lock_init(&t->lock);
tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
if (tcomplete == NULL) {
return_error = BR_FAILED_REPLY;
+ return_error_param = -ENOMEM;
+ return_error_line = __LINE__;
goto err_alloc_tcomplete_failed;
}
binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
- t->debug_id = ++binder_last_id;
- e->debug_id = t->debug_id;
+ t->debug_id = t_debug_id;
if (reply)
binder_debug(BINDER_DEBUG_TRANSACTION,
@@ -2004,11 +2824,18 @@ static void binder_transaction(struct binder_proc *proc,
trace_binder_transaction(reply, t, target_node);
- t->buffer = binder_alloc_buf(target_proc, tr->data_size,
+ t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
tr->offsets_size, extra_buffers_size,
!reply && (t->flags & TF_ONE_WAY));
- if (t->buffer == NULL) {
- return_error = BR_FAILED_REPLY;
+ if (IS_ERR(t->buffer)) {
+ /*
+ * -ESRCH indicates VMA cleared. The target is dying.
+ */
+ return_error_param = PTR_ERR(t->buffer);
+ return_error = return_error_param == -ESRCH ?
+ BR_DEAD_REPLY : BR_FAILED_REPLY;
+ return_error_line = __LINE__;
+ t->buffer = NULL;
goto err_binder_alloc_buf_failed;
}
t->buffer->allow_user_free = 0;
@@ -2016,9 +2843,6 @@ static void binder_transaction(struct binder_proc *proc,
t->buffer->transaction = t;
t->buffer->target_node = target_node;
trace_binder_transaction_alloc_buf(t->buffer);
- if (target_node)
- binder_inc_node(target_node, 1, 0, NULL);
-
off_start = (binder_size_t *)(t->buffer->data +
ALIGN(tr->data_size, sizeof(void *)));
offp = off_start;
@@ -2028,6 +2852,8 @@ static void binder_transaction(struct binder_proc *proc,
binder_user_error("%d:%d got transaction with invalid data ptr\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
+ return_error_param = -EFAULT;
+ return_error_line = __LINE__;
goto err_copy_data_failed;
}
if (copy_from_user(offp, (const void __user *)(uintptr_t)
@@ -2035,12 +2861,16 @@ static void binder_transaction(struct binder_proc *proc,
binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
+ return_error_param = -EFAULT;
+ return_error_line = __LINE__;
goto err_copy_data_failed;
}
if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
proc->pid, thread->pid, (u64)tr->offsets_size);
return_error = BR_FAILED_REPLY;
+ return_error_param = -EINVAL;
+ return_error_line = __LINE__;
goto err_bad_offset;
}
if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
@@ -2048,6 +2878,8 @@ static void binder_transaction(struct binder_proc *proc,
proc->pid, thread->pid,
(u64)extra_buffers_size);
return_error = BR_FAILED_REPLY;
+ return_error_param = -EINVAL;
+ return_error_line = __LINE__;
goto err_bad_offset;
}
off_end = (void *)off_start + tr->offsets_size;
@@ -2064,6 +2896,8 @@ static void binder_transaction(struct binder_proc *proc,
(u64)off_min,
(u64)t->buffer->data_size);
return_error = BR_FAILED_REPLY;
+ return_error_param = -EINVAL;
+ return_error_line = __LINE__;
goto err_bad_offset;
}
@@ -2078,6 +2912,8 @@ static void binder_transaction(struct binder_proc *proc,
ret = binder_translate_binder(fp, t, thread);
if (ret < 0) {
return_error = BR_FAILED_REPLY;
+ return_error_param = ret;
+ return_error_line = __LINE__;
goto err_translate_failed;
}
} break;
@@ -2089,6 +2925,8 @@ static void binder_transaction(struct binder_proc *proc,
ret = binder_translate_handle(fp, t, thread);
if (ret < 0) {
return_error = BR_FAILED_REPLY;
+ return_error_param = ret;
+ return_error_line = __LINE__;
goto err_translate_failed;
}
} break;
@@ -2100,6 +2938,8 @@ static void binder_transaction(struct binder_proc *proc,
if (target_fd < 0) {
return_error = BR_FAILED_REPLY;
+ return_error_param = target_fd;
+ return_error_line = __LINE__;
goto err_translate_failed;
}
fp->pad_binder = 0;
@@ -2116,6 +2956,8 @@ static void binder_transaction(struct binder_proc *proc,
binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
+ return_error_param = -EINVAL;
+ return_error_line = __LINE__;
goto err_bad_parent;
}
if (!binder_validate_fixup(t->buffer, off_start,
@@ -2125,12 +2967,16 @@ static void binder_transaction(struct binder_proc *proc,
binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
+ return_error_param = -EINVAL;
+ return_error_line = __LINE__;
goto err_bad_parent;
}
ret = binder_translate_fd_array(fda, parent, t, thread,
in_reply_to);
if (ret < 0) {
return_error = BR_FAILED_REPLY;
+ return_error_param = ret;
+ return_error_line = __LINE__;
goto err_translate_failed;
}
last_fixup_obj = parent;
@@ -2146,6 +2992,8 @@ static void binder_transaction(struct binder_proc *proc,
binder_user_error("%d:%d got transaction with too large buffer\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
+ return_error_param = -EINVAL;
+ return_error_line = __LINE__;
goto err_bad_offset;
}
if (copy_from_user(sg_bufp,
@@ -2153,12 +3001,15 @@ static void binder_transaction(struct binder_proc *proc,
bp->buffer, bp->length)) {
binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
proc->pid, thread->pid);
+ return_error_param = -EFAULT;
return_error = BR_FAILED_REPLY;
+ return_error_line = __LINE__;
goto err_copy_data_failed;
}
/* Fixup buffer pointer to target proc address space */
bp->buffer = (uintptr_t)sg_bufp +
- target_proc->user_buffer_offset;
+ binder_alloc_get_user_buffer_offset(
+ &target_proc->alloc);
sg_bufp += ALIGN(bp->length, sizeof(u64));
ret = binder_fixup_parent(t, thread, bp, off_start,
@@ -2167,6 +3018,8 @@ static void binder_transaction(struct binder_proc *proc,
last_fixup_min_off);
if (ret < 0) {
return_error = BR_FAILED_REPLY;
+ return_error_param = ret;
+ return_error_line = __LINE__;
goto err_translate_failed;
}
last_fixup_obj = bp;
@@ -2176,38 +3029,60 @@ static void binder_transaction(struct binder_proc *proc,
binder_user_error("%d:%d got transaction with invalid object type, %x\n",
proc->pid, thread->pid, hdr->type);
return_error = BR_FAILED_REPLY;
+ return_error_param = -EINVAL;
+ return_error_line = __LINE__;
goto err_bad_object_type;
}
}
+ tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
+ binder_enqueue_work(proc, tcomplete, &thread->todo);
+ t->work.type = BINDER_WORK_TRANSACTION;
+
if (reply) {
+ binder_inner_proc_lock(target_proc);
+ if (target_thread->is_dead) {
+ binder_inner_proc_unlock(target_proc);
+ goto err_dead_proc_or_thread;
+ }
BUG_ON(t->buffer->async_transaction != 0);
- binder_pop_transaction(target_thread, in_reply_to);
+ binder_pop_transaction_ilocked(target_thread, in_reply_to);
+ binder_enqueue_work_ilocked(&t->work, &target_thread->todo);
+ binder_inner_proc_unlock(target_proc);
+ wake_up_interruptible_sync(&target_thread->wait);
+ binder_free_transaction(in_reply_to);
} else if (!(t->flags & TF_ONE_WAY)) {
BUG_ON(t->buffer->async_transaction != 0);
+ binder_inner_proc_lock(proc);
t->need_reply = 1;
t->from_parent = thread->transaction_stack;
thread->transaction_stack = t;
+ binder_inner_proc_unlock(proc);
+ if (!binder_proc_transaction(t, target_proc, target_thread)) {
+ binder_inner_proc_lock(proc);
+ binder_pop_transaction_ilocked(thread, t);
+ binder_inner_proc_unlock(proc);
+ goto err_dead_proc_or_thread;
+ }
} else {
BUG_ON(target_node == NULL);
BUG_ON(t->buffer->async_transaction != 1);
- if (target_node->has_async_transaction) {
- target_list = &target_node->async_todo;
- target_wait = NULL;
- } else
- target_node->has_async_transaction = 1;
- }
- t->work.type = BINDER_WORK_TRANSACTION;
- list_add_tail(&t->work.entry, target_list);
- tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
- list_add_tail(&tcomplete->entry, &thread->todo);
- if (target_wait) {
- if (reply || !(t->flags & TF_ONE_WAY))
- wake_up_interruptible_sync(target_wait);
- else
- wake_up_interruptible(target_wait);
+ if (!binder_proc_transaction(t, target_proc, NULL))
+ goto err_dead_proc_or_thread;
}
+ if (target_thread)
+ binder_thread_dec_tmpref(target_thread);
+ binder_proc_dec_tmpref(target_proc);
+ /*
+ * write barrier to synchronize with initialization
+ * of log entry
+ */
+ smp_wmb();
+ WRITE_ONCE(e->debug_id_done, t_debug_id);
return;
+err_dead_proc_or_thread:
+ return_error = BR_DEAD_REPLY;
+ return_error_line = __LINE__;
err_translate_failed:
err_bad_object_type:
err_bad_offset:
@@ -2215,8 +3090,9 @@ err_bad_parent:
err_copy_data_failed:
trace_binder_transaction_failed_buffer_release(t->buffer);
binder_transaction_buffer_release(target_proc, t->buffer, offp);
+ target_node = NULL;
t->buffer->transaction = NULL;
- binder_free_buf(target_proc, t->buffer);
+ binder_alloc_free_buf(&target_proc->alloc, t->buffer);
err_binder_alloc_buf_failed:
kfree(tcomplete);
binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
@@ -2229,24 +3105,49 @@ err_empty_call_stack:
err_dead_binder:
err_invalid_target_handle:
err_no_context_mgr_node:
+ if (target_thread)
+ binder_thread_dec_tmpref(target_thread);
+ if (target_proc)
+ binder_proc_dec_tmpref(target_proc);
+ if (target_node)
+ binder_dec_node(target_node, 1, 0);
+
binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
- "%d:%d transaction failed %d, size %lld-%lld\n",
- proc->pid, thread->pid, return_error,
- (u64)tr->data_size, (u64)tr->offsets_size);
+ "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
+ proc->pid, thread->pid, return_error, return_error_param,
+ (u64)tr->data_size, (u64)tr->offsets_size,
+ return_error_line);
{
struct binder_transaction_log_entry *fe;
+ e->return_error = return_error;
+ e->return_error_param = return_error_param;
+ e->return_error_line = return_error_line;
fe = binder_transaction_log_add(&binder_transaction_log_failed);
*fe = *e;
+ /*
+ * write barrier to synchronize with initialization
+ * of log entry
+ */
+ smp_wmb();
+ WRITE_ONCE(e->debug_id_done, t_debug_id);
+ WRITE_ONCE(fe->debug_id_done, t_debug_id);
}
- BUG_ON(thread->return_error != BR_OK);
+ BUG_ON(thread->return_error.cmd != BR_OK);
if (in_reply_to) {
- thread->return_error = BR_TRANSACTION_COMPLETE;
+ thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
+ binder_enqueue_work(thread->proc,
+ &thread->return_error.work,
+ &thread->todo);
binder_send_failed_reply(in_reply_to, return_error);
- } else
- thread->return_error = return_error;
+ } else {
+ thread->return_error.cmd = return_error;
+ binder_enqueue_work(thread->proc,
+ &thread->return_error.work,
+ &thread->todo);
+ }
}
static int binder_thread_write(struct binder_proc *proc,
@@ -2260,15 +3161,17 @@ static int binder_thread_write(struct binder_proc *proc,
void __user *ptr = buffer + *consumed;
void __user *end = buffer + size;
- while (ptr < end && thread->return_error == BR_OK) {
+ while (ptr < end && thread->return_error.cmd == BR_OK) {
+ int ret;
+
if (get_user(cmd, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
trace_binder_command(cmd);
if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
- binder_stats.bc[_IOC_NR(cmd)]++;
- proc->stats.bc[_IOC_NR(cmd)]++;
- thread->stats.bc[_IOC_NR(cmd)]++;
+ atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
+ atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
+ atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
}
switch (cmd) {
case BC_INCREFS:
@@ -2276,53 +3179,61 @@ static int binder_thread_write(struct binder_proc *proc,
case BC_RELEASE:
case BC_DECREFS: {
uint32_t target;
- struct binder_ref *ref;
const char *debug_string;
+ bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
+ bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
+ struct binder_ref_data rdata;
if (get_user(target, (uint32_t __user *)ptr))
return -EFAULT;
+
ptr += sizeof(uint32_t);
- if (target == 0 && context->binder_context_mgr_node &&
- (cmd == BC_INCREFS || cmd == BC_ACQUIRE)) {
- ref = binder_get_ref_for_node(proc,
- context->binder_context_mgr_node);
- if (ref->desc != target) {
- binder_user_error("%d:%d tried to acquire reference to desc 0, got %d instead\n",
- proc->pid, thread->pid,
- ref->desc);
- }
- } else
- ref = binder_get_ref(proc, target,
- cmd == BC_ACQUIRE ||
- cmd == BC_RELEASE);
- if (ref == NULL) {
- binder_user_error("%d:%d refcount change on invalid ref %d\n",
- proc->pid, thread->pid, target);
- break;
+ ret = -1;
+ if (increment && !target) {
+ struct binder_node *ctx_mgr_node;
+ mutex_lock(&context->context_mgr_node_lock);
+ ctx_mgr_node = context->binder_context_mgr_node;
+ if (ctx_mgr_node)
+ ret = binder_inc_ref_for_node(
+ proc, ctx_mgr_node,
+ strong, NULL, &rdata);
+ mutex_unlock(&context->context_mgr_node_lock);
+ }
+ if (ret)
+ ret = binder_update_ref_for_handle(
+ proc, target, increment, strong,
+ &rdata);
+ if (!ret && rdata.desc != target) {
+ binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
+ proc->pid, thread->pid,
+ target, rdata.desc);
}
switch (cmd) {
case BC_INCREFS:
debug_string = "IncRefs";
- binder_inc_ref(ref, 0, NULL);
break;
case BC_ACQUIRE:
debug_string = "Acquire";
- binder_inc_ref(ref, 1, NULL);
break;
case BC_RELEASE:
debug_string = "Release";
- binder_dec_ref(ref, 1);
break;
case BC_DECREFS:
default:
debug_string = "DecRefs";
- binder_dec_ref(ref, 0);
+ break;
+ }
+ if (ret) {
+ binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
+ proc->pid, thread->pid, debug_string,
+ strong, target, ret);
break;
}
binder_debug(BINDER_DEBUG_USER_REFS,
- "%d:%d %s ref %d desc %d s %d w %d for node %d\n",
- proc->pid, thread->pid, debug_string, ref->debug_id,
- ref->desc, ref->strong, ref->weak, ref->node->debug_id);
+ "%d:%d %s ref %d desc %d s %d w %d\n",
+ proc->pid, thread->pid, debug_string,
+ rdata.debug_id, rdata.desc, rdata.strong,
+ rdata.weak);
break;
}
case BC_INCREFS_DONE:
@@ -2330,6 +3241,7 @@ static int binder_thread_write(struct binder_proc *proc,
binder_uintptr_t node_ptr;
binder_uintptr_t cookie;
struct binder_node *node;
+ bool free_node;
if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
return -EFAULT;
@@ -2354,13 +3266,17 @@ static int binder_thread_write(struct binder_proc *proc,
"BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
(u64)node_ptr, node->debug_id,
(u64)cookie, (u64)node->cookie);
+ binder_put_node(node);
break;
}
+ binder_node_inner_lock(node);
if (cmd == BC_ACQUIRE_DONE) {
if (node->pending_strong_ref == 0) {
binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
proc->pid, thread->pid,
node->debug_id);
+ binder_node_inner_unlock(node);
+ binder_put_node(node);
break;
}
node->pending_strong_ref = 0;
@@ -2369,16 +3285,23 @@ static int binder_thread_write(struct binder_proc *proc,
binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
proc->pid, thread->pid,
node->debug_id);
+ binder_node_inner_unlock(node);
+ binder_put_node(node);
break;
}
node->pending_weak_ref = 0;
}
- binder_dec_node(node, cmd == BC_ACQUIRE_DONE, 0);
+ free_node = binder_dec_node_nilocked(node,
+ cmd == BC_ACQUIRE_DONE, 0);
+ WARN_ON(free_node);
binder_debug(BINDER_DEBUG_USER_REFS,
- "%d:%d %s node %d ls %d lw %d\n",
+ "%d:%d %s node %d ls %d lw %d tr %d\n",
proc->pid, thread->pid,
cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
- node->debug_id, node->local_strong_refs, node->local_weak_refs);
+ node->debug_id, node->local_strong_refs,
+ node->local_weak_refs, node->tmp_refs);
+ binder_node_inner_unlock(node);
+ binder_put_node(node);
break;
}
case BC_ATTEMPT_ACQUIRE:
@@ -2396,7 +3319,8 @@ static int binder_thread_write(struct binder_proc *proc,
return -EFAULT;
ptr += sizeof(binder_uintptr_t);
- buffer = binder_buffer_lookup(proc, data_ptr);
+ buffer = binder_alloc_prepare_to_free(&proc->alloc,
+ data_ptr);
if (buffer == NULL) {
binder_user_error("%d:%d BC_FREE_BUFFER u%016llx no match\n",
proc->pid, thread->pid, (u64)data_ptr);
@@ -2418,15 +3342,27 @@ static int binder_thread_write(struct binder_proc *proc,
buffer->transaction = NULL;
}
if (buffer->async_transaction && buffer->target_node) {
- BUG_ON(!buffer->target_node->has_async_transaction);
- if (list_empty(&buffer->target_node->async_todo))
- buffer->target_node->has_async_transaction = 0;
- else
- list_move_tail(buffer->target_node->async_todo.next, &thread->todo);
+ struct binder_node *buf_node;
+ struct binder_work *w;
+
+ buf_node = buffer->target_node;
+ binder_node_inner_lock(buf_node);
+ BUG_ON(!buf_node->has_async_transaction);
+ BUG_ON(buf_node->proc != proc);
+ w = binder_dequeue_work_head_ilocked(
+ &buf_node->async_todo);
+ if (!w) {
+ buf_node->has_async_transaction = 0;
+ } else {
+ binder_enqueue_work_ilocked(
+ w, &proc->todo);
+ binder_wakeup_proc_ilocked(proc);
+ }
+ binder_node_inner_unlock(buf_node);
}
trace_binder_transaction_buffer_release(buffer);
binder_transaction_buffer_release(proc, buffer, NULL);
- binder_free_buf(proc, buffer);
+ binder_alloc_free_buf(&proc->alloc, buffer);
break;
}
@@ -2457,6 +3393,7 @@ static int binder_thread_write(struct binder_proc *proc,
binder_debug(BINDER_DEBUG_THREADS,
"%d:%d BC_REGISTER_LOOPER\n",
proc->pid, thread->pid);
+ binder_inner_proc_lock(proc);
if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
thread->looper |= BINDER_LOOPER_STATE_INVALID;
binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
@@ -2470,6 +3407,7 @@ static int binder_thread_write(struct binder_proc *proc,
proc->requested_threads_started++;
}
thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
+ binder_inner_proc_unlock(proc);
break;
case BC_ENTER_LOOPER:
binder_debug(BINDER_DEBUG_THREADS,
@@ -2494,7 +3432,7 @@ static int binder_thread_write(struct binder_proc *proc,
uint32_t target;
binder_uintptr_t cookie;
struct binder_ref *ref;
- struct binder_ref_death *death;
+ struct binder_ref_death *death = NULL;
if (get_user(target, (uint32_t __user *)ptr))
return -EFAULT;
@@ -2502,7 +3440,29 @@ static int binder_thread_write(struct binder_proc *proc,
if (get_user(cookie, (binder_uintptr_t __user *)ptr))
return -EFAULT;
ptr += sizeof(binder_uintptr_t);
- ref = binder_get_ref(proc, target, false);
+ if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
+ /*
+ * Allocate memory for death notification
+ * before taking lock
+ */
+ death = kzalloc(sizeof(*death), GFP_KERNEL);
+ if (death == NULL) {
+ WARN_ON(thread->return_error.cmd !=
+ BR_OK);
+ thread->return_error.cmd = BR_ERROR;
+ binder_enqueue_work(
+ thread->proc,
+ &thread->return_error.work,
+ &thread->todo);
+ binder_debug(
+ BINDER_DEBUG_FAILED_TRANSACTION,
+ "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
+ proc->pid, thread->pid);
+ break;
+ }
+ }
+ binder_proc_lock(proc);
+ ref = binder_get_ref_olocked(proc, target, false);
if (ref == NULL) {
binder_user_error("%d:%d %s invalid ref %d\n",
proc->pid, thread->pid,
@@ -2510,6 +3470,8 @@ static int binder_thread_write(struct binder_proc *proc,
"BC_REQUEST_DEATH_NOTIFICATION" :
"BC_CLEAR_DEATH_NOTIFICATION",
target);
+ binder_proc_unlock(proc);
+ kfree(death);
break;
}
@@ -2519,21 +3481,18 @@ static int binder_thread_write(struct binder_proc *proc,
cmd == BC_REQUEST_DEATH_NOTIFICATION ?
"BC_REQUEST_DEATH_NOTIFICATION" :
"BC_CLEAR_DEATH_NOTIFICATION",
- (u64)cookie, ref->debug_id, ref->desc,
- ref->strong, ref->weak, ref->node->debug_id);
+ (u64)cookie, ref->data.debug_id,
+ ref->data.desc, ref->data.strong,
+ ref->data.weak, ref->node->debug_id);
+ binder_node_lock(ref->node);
if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
if (ref->death) {
binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
proc->pid, thread->pid);
- break;
- }
- death = kzalloc(sizeof(*death), GFP_KERNEL);
- if (death == NULL) {
- thread->return_error = BR_ERROR;
- binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
- "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
- proc->pid, thread->pid);
+ binder_node_unlock(ref->node);
+ binder_proc_unlock(proc);
+ kfree(death);
break;
}
binder_stats_created(BINDER_STAT_DEATH);
@@ -2542,17 +3501,19 @@ static int binder_thread_write(struct binder_proc *proc,
ref->death = death;
if (ref->node->proc == NULL) {
ref->death->work.type = BINDER_WORK_DEAD_BINDER;
- if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
- list_add_tail(&ref->death->work.entry, &thread->todo);
- } else {
- list_add_tail(&ref->death->work.entry, &proc->todo);
- wake_up_interruptible(&proc->wait);
- }
+
+ binder_inner_proc_lock(proc);
+ binder_enqueue_work_ilocked(
+ &ref->death->work, &proc->todo);
+ binder_wakeup_proc_ilocked(proc);
+ binder_inner_proc_unlock(proc);
}
} else {
if (ref->death == NULL) {
binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
proc->pid, thread->pid);
+ binder_node_unlock(ref->node);
+ binder_proc_unlock(proc);
break;
}
death = ref->death;
@@ -2561,22 +3522,35 @@ static int binder_thread_write(struct binder_proc *proc,
proc->pid, thread->pid,
(u64)death->cookie,
(u64)cookie);
+ binder_node_unlock(ref->node);
+ binder_proc_unlock(proc);
break;
}
ref->death = NULL;
+ binder_inner_proc_lock(proc);
if (list_empty(&death->work.entry)) {
death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
- if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
- list_add_tail(&death->work.entry, &thread->todo);
- } else {
- list_add_tail(&death->work.entry, &proc->todo);
- wake_up_interruptible(&proc->wait);
+ if (thread->looper &
+ (BINDER_LOOPER_STATE_REGISTERED |
+ BINDER_LOOPER_STATE_ENTERED))
+ binder_enqueue_work_ilocked(
+ &death->work,
+ &thread->todo);
+ else {
+ binder_enqueue_work_ilocked(
+ &death->work,
+ &proc->todo);
+ binder_wakeup_proc_ilocked(
+ proc);
}
} else {
BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
}
+ binder_inner_proc_unlock(proc);
}
+ binder_node_unlock(ref->node);
+ binder_proc_unlock(proc);
} break;
case BC_DEAD_BINDER_DONE: {
struct binder_work *w;
@@ -2587,8 +3561,13 @@ static int binder_thread_write(struct binder_proc *proc,
return -EFAULT;
ptr += sizeof(cookie);
- list_for_each_entry(w, &proc->delivered_death, entry) {
- struct binder_ref_death *tmp_death = container_of(w, struct binder_ref_death, work);
+ binder_inner_proc_lock(proc);
+ list_for_each_entry(w, &proc->delivered_death,
+ entry) {
+ struct binder_ref_death *tmp_death =
+ container_of(w,
+ struct binder_ref_death,
+ work);
if (tmp_death->cookie == cookie) {
death = tmp_death;
@@ -2602,19 +3581,25 @@ static int binder_thread_write(struct binder_proc *proc,
if (death == NULL) {
binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
proc->pid, thread->pid, (u64)cookie);
+ binder_inner_proc_unlock(proc);
break;
}
-
- list_del_init(&death->work.entry);
+ binder_dequeue_work_ilocked(&death->work);
if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
- if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
- list_add_tail(&death->work.entry, &thread->todo);
- } else {
- list_add_tail(&death->work.entry, &proc->todo);
- wake_up_interruptible(&proc->wait);
+ if (thread->looper &
+ (BINDER_LOOPER_STATE_REGISTERED |
+ BINDER_LOOPER_STATE_ENTERED))
+ binder_enqueue_work_ilocked(
+ &death->work, &thread->todo);
+ else {
+ binder_enqueue_work_ilocked(
+ &death->work,
+ &proc->todo);
+ binder_wakeup_proc_ilocked(proc);
}
}
+ binder_inner_proc_unlock(proc);
} break;
default:
@@ -2632,23 +3617,79 @@ static void binder_stat_br(struct binder_proc *proc,
{
trace_binder_return(cmd);
if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
- binder_stats.br[_IOC_NR(cmd)]++;
- proc->stats.br[_IOC_NR(cmd)]++;
- thread->stats.br[_IOC_NR(cmd)]++;
+ atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
+ atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
+ atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
}
}
-static int binder_has_proc_work(struct binder_proc *proc,
- struct binder_thread *thread)
+static int binder_has_thread_work(struct binder_thread *thread)
{
- return !list_empty(&proc->todo) ||
- (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN);
+ return !binder_worklist_empty(thread->proc, &thread->todo) ||
+ thread->looper_need_return;
}
-static int binder_has_thread_work(struct binder_thread *thread)
+static int binder_put_node_cmd(struct binder_proc *proc,
+ struct binder_thread *thread,
+ void __user **ptrp,
+ binder_uintptr_t node_ptr,
+ binder_uintptr_t node_cookie,
+ int node_debug_id,
+ uint32_t cmd, const char *cmd_name)
{
- return !list_empty(&thread->todo) || thread->return_error != BR_OK ||
- (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN);
+ void __user *ptr = *ptrp;
+
+ if (put_user(cmd, (uint32_t __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(uint32_t);
+
+ if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(binder_uintptr_t);
+
+ if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(binder_uintptr_t);
+
+ binder_stat_br(proc, thread, cmd);
+ binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
+ proc->pid, thread->pid, cmd_name, node_debug_id,
+ (u64)node_ptr, (u64)node_cookie);
+
+ *ptrp = ptr;
+ return 0;
+}
+
+static int binder_wait_for_work(struct binder_thread *thread,
+ bool do_proc_work)
+{
+ DEFINE_WAIT(wait);
+ struct binder_proc *proc = thread->proc;
+ int ret = 0;
+
+ freezer_do_not_count();
+ binder_inner_proc_lock(proc);
+ for (;;) {
+ prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE);
+ if (binder_has_work_ilocked(thread, do_proc_work))
+ break;
+ if (do_proc_work)
+ list_add(&thread->waiting_thread_node,
+ &proc->waiting_threads);
+ binder_inner_proc_unlock(proc);
+ schedule();
+ binder_inner_proc_lock(proc);
+ list_del_init(&thread->waiting_thread_node);
+ if (signal_pending(current)) {
+ ret = -ERESTARTSYS;
+ break;
+ }
+ }
+ finish_wait(&thread->wait, &wait);
+ binder_inner_proc_unlock(proc);
+ freezer_count();
+
+ return ret;
}
static int binder_thread_read(struct binder_proc *proc,
@@ -2670,37 +3711,15 @@ static int binder_thread_read(struct binder_proc *proc,
}
retry:
- wait_for_proc_work = thread->transaction_stack == NULL &&
- list_empty(&thread->todo);
-
- if (thread->return_error != BR_OK && ptr < end) {
- if (thread->return_error2 != BR_OK) {
- if (put_user(thread->return_error2, (uint32_t __user *)ptr))
- return -EFAULT;
- ptr += sizeof(uint32_t);
- binder_stat_br(proc, thread, thread->return_error2);
- if (ptr == end)
- goto done;
- thread->return_error2 = BR_OK;
- }
- if (put_user(thread->return_error, (uint32_t __user *)ptr))
- return -EFAULT;
- ptr += sizeof(uint32_t);
- binder_stat_br(proc, thread, thread->return_error);
- thread->return_error = BR_OK;
- goto done;
- }
-
+ binder_inner_proc_lock(proc);
+ wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
+ binder_inner_proc_unlock(proc);
thread->looper |= BINDER_LOOPER_STATE_WAITING;
- if (wait_for_proc_work)
- proc->ready_threads++;
-
- binder_unlock(__func__);
trace_binder_wait_for_work(wait_for_proc_work,
!!thread->transaction_stack,
- !list_empty(&thread->todo));
+ !binder_worklist_empty(proc, &thread->todo));
if (wait_for_proc_work) {
if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
BINDER_LOOPER_STATE_ENTERED))) {
@@ -2710,23 +3729,15 @@ retry:
binder_stop_on_user_error < 2);
}
binder_set_nice(proc->default_priority);
- if (non_block) {
- if (!binder_has_proc_work(proc, thread))
- ret = -EAGAIN;
- } else
- ret = wait_event_freezable_exclusive(proc->wait, binder_has_proc_work(proc, thread));
- } else {
- if (non_block) {
- if (!binder_has_thread_work(thread))
- ret = -EAGAIN;
- } else
- ret = wait_event_freezable(thread->wait, binder_has_thread_work(thread));
}
- binder_lock(__func__);
+ if (non_block) {
+ if (!binder_has_work(thread, wait_for_proc_work))
+ ret = -EAGAIN;
+ } else {
+ ret = binder_wait_for_work(thread, wait_for_proc_work);
+ }
- if (wait_for_proc_work)
- proc->ready_threads--;
thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
if (ret)
@@ -2735,31 +3746,52 @@ retry:
while (1) {
uint32_t cmd;
struct binder_transaction_data tr;
- struct binder_work *w;
+ struct binder_work *w = NULL;
+ struct list_head *list = NULL;
struct binder_transaction *t = NULL;
+ struct binder_thread *t_from;
+
+ binder_inner_proc_lock(proc);
+ if (!binder_worklist_empty_ilocked(&thread->todo))
+ list = &thread->todo;
+ else if (!binder_worklist_empty_ilocked(&proc->todo) &&
+ wait_for_proc_work)
+ list = &proc->todo;
+ else {
+ binder_inner_proc_unlock(proc);
- if (!list_empty(&thread->todo)) {
- w = list_first_entry(&thread->todo, struct binder_work,
- entry);
- } else if (!list_empty(&proc->todo) && wait_for_proc_work) {
- w = list_first_entry(&proc->todo, struct binder_work,
- entry);
- } else {
/* no data added */
- if (ptr - buffer == 4 &&
- !(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN))
+ if (ptr - buffer == 4 && !thread->looper_need_return)
goto retry;
break;
}
- if (end - ptr < sizeof(tr) + 4)
+ if (end - ptr < sizeof(tr) + 4) {
+ binder_inner_proc_unlock(proc);
break;
+ }
+ w = binder_dequeue_work_head_ilocked(list);
switch (w->type) {
case BINDER_WORK_TRANSACTION: {
+ binder_inner_proc_unlock(proc);
t = container_of(w, struct binder_transaction, work);
} break;
+ case BINDER_WORK_RETURN_ERROR: {
+ struct binder_error *e = container_of(
+ w, struct binder_error, work);
+
+ WARN_ON(e->cmd == BR_OK);
+ binder_inner_proc_unlock(proc);
+ if (put_user(e->cmd, (uint32_t __user *)ptr))
+ return -EFAULT;
+ e->cmd = BR_OK;
+ ptr += sizeof(uint32_t);
+
+ binder_stat_br(proc, thread, e->cmd);
+ } break;
case BINDER_WORK_TRANSACTION_COMPLETE: {
+ binder_inner_proc_unlock(proc);
cmd = BR_TRANSACTION_COMPLETE;
if (put_user(cmd, (uint32_t __user *)ptr))
return -EFAULT;
@@ -2769,113 +3801,134 @@ retry:
binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
"%d:%d BR_TRANSACTION_COMPLETE\n",
proc->pid, thread->pid);
-
- list_del(&w->entry);
kfree(w);
binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
} break;
case BINDER_WORK_NODE: {
struct binder_node *node = container_of(w, struct binder_node, work);
- uint32_t cmd = BR_NOOP;
- const char *cmd_name;
- int strong = node->internal_strong_refs || node->local_strong_refs;
- int weak = !hlist_empty(&node->refs) || node->local_weak_refs || strong;
-
- if (weak && !node->has_weak_ref) {
- cmd = BR_INCREFS;
- cmd_name = "BR_INCREFS";
+ int strong, weak;
+ binder_uintptr_t node_ptr = node->ptr;
+ binder_uintptr_t node_cookie = node->cookie;
+ int node_debug_id = node->debug_id;
+ int has_weak_ref;
+ int has_strong_ref;
+ void __user *orig_ptr = ptr;
+
+ BUG_ON(proc != node->proc);
+ strong = node->internal_strong_refs ||
+ node->local_strong_refs;
+ weak = !hlist_empty(&node->refs) ||
+ node->local_weak_refs ||
+ node->tmp_refs || strong;
+ has_strong_ref = node->has_strong_ref;
+ has_weak_ref = node->has_weak_ref;
+
+ if (weak && !has_weak_ref) {
node->has_weak_ref = 1;
node->pending_weak_ref = 1;
node->local_weak_refs++;
- } else if (strong && !node->has_strong_ref) {
- cmd = BR_ACQUIRE;
- cmd_name = "BR_ACQUIRE";
+ }
+ if (strong && !has_strong_ref) {
node->has_strong_ref = 1;
node->pending_strong_ref = 1;
node->local_strong_refs++;
- } else if (!strong && node->has_strong_ref) {
- cmd = BR_RELEASE;
- cmd_name = "BR_RELEASE";
+ }
+ if (!strong && has_strong_ref)
node->has_strong_ref = 0;
- } else if (!weak && node->has_weak_ref) {
- cmd = BR_DECREFS;
- cmd_name = "BR_DECREFS";
+ if (!weak && has_weak_ref)
node->has_weak_ref = 0;
- }
- if (cmd != BR_NOOP) {
- if (put_user(cmd, (uint32_t __user *)ptr))
- return -EFAULT;
- ptr += sizeof(uint32_t);
- if (put_user(node->ptr,
- (binder_uintptr_t __user *)ptr))
- return -EFAULT;
- ptr += sizeof(binder_uintptr_t);
- if (put_user(node->cookie,
- (binder_uintptr_t __user *)ptr))
- return -EFAULT;
- ptr += sizeof(binder_uintptr_t);
-
- binder_stat_br(proc, thread, cmd);
- binder_debug(BINDER_DEBUG_USER_REFS,
- "%d:%d %s %d u%016llx c%016llx\n",
- proc->pid, thread->pid, cmd_name,
- node->debug_id,
- (u64)node->ptr, (u64)node->cookie);
- } else {
- list_del_init(&w->entry);
- if (!weak && !strong) {
- binder_debug(BINDER_DEBUG_INTERNAL_REFS,
- "%d:%d node %d u%016llx c%016llx deleted\n",
- proc->pid, thread->pid,
- node->debug_id,
- (u64)node->ptr,
- (u64)node->cookie);
- rb_erase(&node->rb_node, &proc->nodes);
- kfree(node);
- binder_stats_deleted(BINDER_STAT_NODE);
- } else {
- binder_debug(BINDER_DEBUG_INTERNAL_REFS,
- "%d:%d node %d u%016llx c%016llx state unchanged\n",
- proc->pid, thread->pid,
- node->debug_id,
- (u64)node->ptr,
- (u64)node->cookie);
- }
- }
+ if (!weak && !strong) {
+ binder_debug(BINDER_DEBUG_INTERNAL_REFS,
+ "%d:%d node %d u%016llx c%016llx deleted\n",
+ proc->pid, thread->pid,
+ node_debug_id,
+ (u64)node_ptr,
+ (u64)node_cookie);
+ rb_erase(&node->rb_node, &proc->nodes);
+ binder_inner_proc_unlock(proc);
+ binder_node_lock(node);
+ /*
+ * Acquire the node lock before freeing the
+ * node to serialize with other threads that
+ * may have been holding the node lock while
+ * decrementing this node (avoids race where
+ * this thread frees while the other thread
+ * is unlocking the node after the final
+ * decrement)
+ */
+ binder_node_unlock(node);
+ binder_free_node(node);
+ } else
+ binder_inner_proc_unlock(proc);
+
+ if (weak && !has_weak_ref)
+ ret = binder_put_node_cmd(
+ proc, thread, &ptr, node_ptr,
+ node_cookie, node_debug_id,
+ BR_INCREFS, "BR_INCREFS");
+ if (!ret && strong && !has_strong_ref)
+ ret = binder_put_node_cmd(
+ proc, thread, &ptr, node_ptr,
+ node_cookie, node_debug_id,
+ BR_ACQUIRE, "BR_ACQUIRE");
+ if (!ret && !strong && has_strong_ref)
+ ret = binder_put_node_cmd(
+ proc, thread, &ptr, node_ptr,
+ node_cookie, node_debug_id,
+ BR_RELEASE, "BR_RELEASE");
+ if (!ret && !weak && has_weak_ref)
+ ret = binder_put_node_cmd(
+ proc, thread, &ptr, node_ptr,
+ node_cookie, node_debug_id,
+ BR_DECREFS, "BR_DECREFS");
+ if (orig_ptr == ptr)
+ binder_debug(BINDER_DEBUG_INTERNAL_REFS,
+ "%d:%d node %d u%016llx c%016llx state unchanged\n",
+ proc->pid, thread->pid,
+ node_debug_id,
+ (u64)node_ptr,
+ (u64)node_cookie);
+ if (ret)
+ return ret;
} break;
case BINDER_WORK_DEAD_BINDER:
case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
struct binder_ref_death *death;
uint32_t cmd;
+ binder_uintptr_t cookie;
death = container_of(w, struct binder_ref_death, work);
if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
else
cmd = BR_DEAD_BINDER;
- if (put_user(cmd, (uint32_t __user *)ptr))
- return -EFAULT;
- ptr += sizeof(uint32_t);
- if (put_user(death->cookie,
- (binder_uintptr_t __user *)ptr))
- return -EFAULT;
- ptr += sizeof(binder_uintptr_t);
- binder_stat_br(proc, thread, cmd);
+ cookie = death->cookie;
+
binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
"%d:%d %s %016llx\n",
proc->pid, thread->pid,
cmd == BR_DEAD_BINDER ?
"BR_DEAD_BINDER" :
"BR_CLEAR_DEATH_NOTIFICATION_DONE",
- (u64)death->cookie);
-
+ (u64)cookie);
if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
- list_del(&w->entry);
+ binder_inner_proc_unlock(proc);
kfree(death);
binder_stats_deleted(BINDER_STAT_DEATH);
- } else
- list_move(&w->entry, &proc->delivered_death);
+ } else {
+ binder_enqueue_work_ilocked(
+ w, &proc->delivered_death);
+ binder_inner_proc_unlock(proc);
+ }
+ if (put_user(cmd, (uint32_t __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(uint32_t);
+ if (put_user(cookie,
+ (binder_uintptr_t __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(binder_uintptr_t);
+ binder_stat_br(proc, thread, cmd);
if (cmd == BR_DEAD_BINDER)
goto done; /* DEAD_BINDER notifications can cause transactions */
} break;
@@ -2907,8 +3960,9 @@ retry:
tr.flags = t->flags;
tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid);
- if (t->from) {
- struct task_struct *sender = t->from->proc->tsk;
+ t_from = binder_get_txn_from(t);
+ if (t_from) {
+ struct task_struct *sender = t_from->proc->tsk;
tr.sender_pid = task_tgid_nr_ns(sender,
task_active_pid_ns(current));
@@ -2918,18 +3972,24 @@ retry:
tr.data_size = t->buffer->data_size;
tr.offsets_size = t->buffer->offsets_size;
- tr.data.ptr.buffer = (binder_uintptr_t)(
- (uintptr_t)t->buffer->data +
- proc->user_buffer_offset);
+ tr.data.ptr.buffer = (binder_uintptr_t)
+ ((uintptr_t)t->buffer->data +
+ binder_alloc_get_user_buffer_offset(&proc->alloc));
tr.data.ptr.offsets = tr.data.ptr.buffer +
ALIGN(t->buffer->data_size,
sizeof(void *));
- if (put_user(cmd, (uint32_t __user *)ptr))
+ if (put_user(cmd, (uint32_t __user *)ptr)) {
+ if (t_from)
+ binder_thread_dec_tmpref(t_from);
return -EFAULT;
+ }
ptr += sizeof(uint32_t);
- if (copy_to_user(ptr, &tr, sizeof(tr)))
+ if (copy_to_user(ptr, &tr, sizeof(tr))) {
+ if (t_from)
+ binder_thread_dec_tmpref(t_from);
return -EFAULT;
+ }
ptr += sizeof(tr);
trace_binder_transaction_received(t);
@@ -2939,21 +3999,22 @@ retry:
proc->pid, thread->pid,
(cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
"BR_REPLY",
- t->debug_id, t->from ? t->from->proc->pid : 0,
- t->from ? t->from->pid : 0, cmd,
+ t->debug_id, t_from ? t_from->proc->pid : 0,
+ t_from ? t_from->pid : 0, cmd,
t->buffer->data_size, t->buffer->offsets_size,
(u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets);
- list_del(&t->work.entry);
+ if (t_from)
+ binder_thread_dec_tmpref(t_from);
t->buffer->allow_user_free = 1;
if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
+ binder_inner_proc_lock(thread->proc);
t->to_parent = thread->transaction_stack;
t->to_thread = thread;
thread->transaction_stack = t;
+ binder_inner_proc_unlock(thread->proc);
} else {
- t->buffer->transaction = NULL;
- kfree(t);
- binder_stats_deleted(BINDER_STAT_TRANSACTION);
+ binder_free_transaction(t);
}
break;
}
@@ -2961,29 +4022,36 @@ retry:
done:
*consumed = ptr - buffer;
- if (proc->requested_threads + proc->ready_threads == 0 &&
+ binder_inner_proc_lock(proc);
+ if (proc->requested_threads == 0 &&
+ list_empty(&thread->proc->waiting_threads) &&
proc->requested_threads_started < proc->max_threads &&
(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
/*spawn a new thread if we leave this out */) {
proc->requested_threads++;
+ binder_inner_proc_unlock(proc);
binder_debug(BINDER_DEBUG_THREADS,
"%d:%d BR_SPAWN_LOOPER\n",
proc->pid, thread->pid);
if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
return -EFAULT;
binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
- }
+ } else
+ binder_inner_proc_unlock(proc);
return 0;
}
-static void binder_release_work(struct list_head *list)
+static void binder_release_work(struct binder_proc *proc,
+ struct list_head *list)
{
struct binder_work *w;
- while (!list_empty(list)) {
- w = list_first_entry(list, struct binder_work, entry);
- list_del_init(&w->entry);
+ while (1) {
+ w = binder_dequeue_work_head(proc, list);
+ if (!w)
+ return;
+
switch (w->type) {
case BINDER_WORK_TRANSACTION: {
struct binder_transaction *t;
@@ -2996,11 +4064,17 @@ static void binder_release_work(struct list_head *list)
binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
"undelivered transaction %d\n",
t->debug_id);
- t->buffer->transaction = NULL;
- kfree(t);
- binder_stats_deleted(BINDER_STAT_TRANSACTION);
+ binder_free_transaction(t);
}
} break;
+ case BINDER_WORK_RETURN_ERROR: {
+ struct binder_error *e = container_of(
+ w, struct binder_error, work);
+
+ binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
+ "undelivered TRANSACTION_ERROR: %u\n",
+ e->cmd);
+ } break;
case BINDER_WORK_TRANSACTION_COMPLETE: {
binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
"undelivered TRANSACTION_COMPLETE\n");
@@ -3027,7 +4101,8 @@ static void binder_release_work(struct list_head *list)
}
-static struct binder_thread *binder_get_thread(struct binder_proc *proc)
+static struct binder_thread *binder_get_thread_ilocked(
+ struct binder_proc *proc, struct binder_thread *new_thread)
{
struct binder_thread *thread = NULL;
struct rb_node *parent = NULL;
@@ -3042,38 +4117,99 @@ static struct binder_thread *binder_get_thread(struct binder_proc *proc)
else if (current->pid > thread->pid)
p = &(*p)->rb_right;
else
- break;
+ return thread;
}
- if (*p == NULL) {
- thread = kzalloc(sizeof(*thread), GFP_KERNEL);
- if (thread == NULL)
+ if (!new_thread)
+ return NULL;
+ thread = new_thread;
+ binder_stats_created(BINDER_STAT_THREAD);
+ thread->proc = proc;
+ thread->pid = current->pid;
+ atomic_set(&thread->tmp_ref, 0);
+ init_waitqueue_head(&thread->wait);
+ INIT_LIST_HEAD(&thread->todo);
+ rb_link_node(&thread->rb_node, parent, p);
+ rb_insert_color(&thread->rb_node, &proc->threads);
+ thread->looper_need_return = true;
+ thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
+ thread->return_error.cmd = BR_OK;
+ thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
+ thread->reply_error.cmd = BR_OK;
+ INIT_LIST_HEAD(&new_thread->waiting_thread_node);
+ return thread;
+}
+
+static struct binder_thread *binder_get_thread(struct binder_proc *proc)
+{
+ struct binder_thread *thread;
+ struct binder_thread *new_thread;
+
+ binder_inner_proc_lock(proc);
+ thread = binder_get_thread_ilocked(proc, NULL);
+ binder_inner_proc_unlock(proc);
+ if (!thread) {
+ new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
+ if (new_thread == NULL)
return NULL;
- binder_stats_created(BINDER_STAT_THREAD);
- thread->proc = proc;
- thread->pid = current->pid;
- init_waitqueue_head(&thread->wait);
- INIT_LIST_HEAD(&thread->todo);
- rb_link_node(&thread->rb_node, parent, p);
- rb_insert_color(&thread->rb_node, &proc->threads);
- thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN;
- thread->return_error = BR_OK;
- thread->return_error2 = BR_OK;
+ binder_inner_proc_lock(proc);
+ thread = binder_get_thread_ilocked(proc, new_thread);
+ binder_inner_proc_unlock(proc);
+ if (thread != new_thread)
+ kfree(new_thread);
}
return thread;
}
-static int binder_free_thread(struct binder_proc *proc,
- struct binder_thread *thread)
+static void binder_free_proc(struct binder_proc *proc)
+{
+ BUG_ON(!list_empty(&proc->todo));
+ BUG_ON(!list_empty(&proc->delivered_death));
+ binder_alloc_deferred_release(&proc->alloc);
+ put_task_struct(proc->tsk);
+ binder_stats_deleted(BINDER_STAT_PROC);
+ kfree(proc);
+}
+
+static void binder_free_thread(struct binder_thread *thread)
+{
+ BUG_ON(!list_empty(&thread->todo));
+ binder_stats_deleted(BINDER_STAT_THREAD);
+ binder_proc_dec_tmpref(thread->proc);
+ kfree(thread);
+}
+
+static int binder_thread_release(struct binder_proc *proc,
+ struct binder_thread *thread)
{
struct binder_transaction *t;
struct binder_transaction *send_reply = NULL;
int active_transactions = 0;
+ struct binder_transaction *last_t = NULL;
+ binder_inner_proc_lock(thread->proc);
+ /*
+ * take a ref on the proc so it survives
+ * after we remove this thread from proc->threads.
+ * The corresponding dec is when we actually
+ * free the thread in binder_free_thread()
+ */
+ proc->tmp_ref++;
+ /*
+ * take a ref on this thread to ensure it
+ * survives while we are releasing it
+ */
+ atomic_inc(&thread->tmp_ref);
rb_erase(&thread->rb_node, &proc->threads);
t = thread->transaction_stack;
- if (t && t->to_thread == thread)
- send_reply = t;
+ if (t) {
+ spin_lock(&t->lock);
+ if (t->to_thread == thread)
+ send_reply = t;
+ }
+ thread->is_dead = true;
+
while (t) {
+ last_t = t;
active_transactions++;
binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
"release %d:%d transaction %d %s, still active\n",
@@ -3094,12 +4230,16 @@ static int binder_free_thread(struct binder_proc *proc,
t = t->from_parent;
} else
BUG();
+ spin_unlock(&last_t->lock);
+ if (t)
+ spin_lock(&t->lock);
}
+ binder_inner_proc_unlock(thread->proc);
+
if (send_reply)
binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
- binder_release_work(&thread->todo);
- kfree(thread);
- binder_stats_deleted(BINDER_STAT_THREAD);
+ binder_release_work(proc, &thread->todo);
+ binder_thread_dec_tmpref(thread);
return active_transactions;
}
@@ -3108,30 +4248,24 @@ static unsigned int binder_poll(struct file *filp,
{
struct binder_proc *proc = filp->private_data;
struct binder_thread *thread = NULL;
- int wait_for_proc_work;
-
- binder_lock(__func__);
+ bool wait_for_proc_work;
thread = binder_get_thread(proc);
- wait_for_proc_work = thread->transaction_stack == NULL &&
- list_empty(&thread->todo) && thread->return_error == BR_OK;
+ binder_inner_proc_lock(thread->proc);
+ thread->looper |= BINDER_LOOPER_STATE_POLL;
+ wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
- binder_unlock(__func__);
+ binder_inner_proc_unlock(thread->proc);
+
+ if (binder_has_work(thread, wait_for_proc_work))
+ return POLLIN;
+
+ poll_wait(filp, &thread->wait, wait);
+
+ if (binder_has_thread_work(thread))
+ return POLLIN;
- if (wait_for_proc_work) {
- if (binder_has_proc_work(proc, thread))
- return POLLIN;
- poll_wait(filp, &proc->wait, wait);
- if (binder_has_proc_work(proc, thread))
- return POLLIN;
- } else {
- if (binder_has_thread_work(thread))
- return POLLIN;
- poll_wait(filp, &thread->wait, wait);
- if (binder_has_thread_work(thread))
- return POLLIN;
- }
return 0;
}
@@ -3178,8 +4312,10 @@ static int binder_ioctl_write_read(struct file *filp,
&bwr.read_consumed,
filp->f_flags & O_NONBLOCK);
trace_binder_read_done(ret);
- if (!list_empty(&proc->todo))
- wake_up_interruptible(&proc->wait);
+ binder_inner_proc_lock(proc);
+ if (!binder_worklist_empty_ilocked(&proc->todo))
+ binder_wakeup_proc_ilocked(proc);
+ binder_inner_proc_unlock(proc);
if (ret < 0) {
if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
ret = -EFAULT;
@@ -3204,9 +4340,10 @@ static int binder_ioctl_set_ctx_mgr(struct file *filp)
int ret = 0;
struct binder_proc *proc = filp->private_data;
struct binder_context *context = proc->context;
-
+ struct binder_node *new_node;
kuid_t curr_euid = current_euid();
+ mutex_lock(&context->context_mgr_node_lock);
if (context->binder_context_mgr_node) {
pr_err("BINDER_SET_CONTEXT_MGR already set\n");
ret = -EBUSY;
@@ -3227,19 +4364,49 @@ static int binder_ioctl_set_ctx_mgr(struct file *filp)
} else {
context->binder_context_mgr_uid = curr_euid;
}
- context->binder_context_mgr_node = binder_new_node(proc, 0, 0);
- if (!context->binder_context_mgr_node) {
+ new_node = binder_new_node(proc, NULL);
+ if (!new_node) {
ret = -ENOMEM;
goto out;
}
- context->binder_context_mgr_node->local_weak_refs++;
- context->binder_context_mgr_node->local_strong_refs++;
- context->binder_context_mgr_node->has_strong_ref = 1;
- context->binder_context_mgr_node->has_weak_ref = 1;
+ binder_node_lock(new_node);
+ new_node->local_weak_refs++;
+ new_node->local_strong_refs++;
+ new_node->has_strong_ref = 1;
+ new_node->has_weak_ref = 1;
+ context->binder_context_mgr_node = new_node;
+ binder_node_unlock(new_node);
+ binder_put_node(new_node);
out:
+ mutex_unlock(&context->context_mgr_node_lock);
return ret;
}
+static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
+ struct binder_node_debug_info *info)
+{
+ struct rb_node *n;
+ binder_uintptr_t ptr = info->ptr;
+
+ memset(info, 0, sizeof(*info));
+
+ binder_inner_proc_lock(proc);
+ for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
+ struct binder_node *node = rb_entry(n, struct binder_node,
+ rb_node);
+ if (node->ptr > ptr) {
+ info->ptr = node->ptr;
+ info->cookie = node->cookie;
+ info->has_strong_ref = node->has_strong_ref;
+ info->has_weak_ref = node->has_weak_ref;
+ break;
+ }
+ }
+ binder_inner_proc_unlock(proc);
+
+ return 0;
+}
+
static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
int ret;
@@ -3251,13 +4418,14 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
/*pr_info("binder_ioctl: %d:%d %x %lx\n",
proc->pid, current->pid, cmd, arg);*/
+ binder_selftest_alloc(&proc->alloc);
+
trace_binder_ioctl(cmd, arg);
ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
if (ret)
goto err_unlocked;
- binder_lock(__func__);
thread = binder_get_thread(proc);
if (thread == NULL) {
ret = -ENOMEM;
@@ -3270,12 +4438,19 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
if (ret)
goto err;
break;
- case BINDER_SET_MAX_THREADS:
- if (copy_from_user(&proc->max_threads, ubuf, sizeof(proc->max_threads))) {
+ case BINDER_SET_MAX_THREADS: {
+ int max_threads;
+
+ if (copy_from_user(&max_threads, ubuf,
+ sizeof(max_threads))) {
ret = -EINVAL;
goto err;
}
+ binder_inner_proc_lock(proc);
+ proc->max_threads = max_threads;
+ binder_inner_proc_unlock(proc);
break;
+ }
case BINDER_SET_CONTEXT_MGR:
ret = binder_ioctl_set_ctx_mgr(filp);
if (ret)
@@ -3284,7 +4459,7 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
case BINDER_THREAD_EXIT:
binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
proc->pid, thread->pid);
- binder_free_thread(proc, thread);
+ binder_thread_release(proc, thread);
thread = NULL;
break;
case BINDER_VERSION: {
@@ -3301,6 +4476,24 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
}
break;
}
+ case BINDER_GET_NODE_DEBUG_INFO: {
+ struct binder_node_debug_info info;
+
+ if (copy_from_user(&info, ubuf, sizeof(info))) {
+ ret = -EFAULT;
+ goto err;
+ }
+
+ ret = binder_ioctl_get_node_debug_info(proc, &info);
+ if (ret < 0)
+ goto err;
+
+ if (copy_to_user(ubuf, &info, sizeof(info))) {
+ ret = -EFAULT;
+ goto err;
+ }
+ break;
+ }
default:
ret = -EINVAL;
goto err;
@@ -3308,8 +4501,7 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
ret = 0;
err:
if (thread)
- thread->looper &= ~BINDER_LOOPER_STATE_NEED_RETURN;
- binder_unlock(__func__);
+ thread->looper_need_return = false;
wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
if (ret && ret != -ERESTARTSYS)
pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
@@ -3338,8 +4530,7 @@ static void binder_vma_close(struct vm_area_struct *vma)
proc->pid, vma->vm_start, vma->vm_end,
(vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
(unsigned long)pgprot_val(vma->vm_page_prot));
- proc->vma = NULL;
- proc->vma_vm_mm = NULL;
+ binder_alloc_vma_close(&proc->alloc);
binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
}
@@ -3357,20 +4548,18 @@ static const struct vm_operations_struct binder_vm_ops = {
static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
{
int ret;
- struct vm_struct *area;
struct binder_proc *proc = filp->private_data;
const char *failure_string;
- struct binder_buffer *buffer;
- if (proc->tsk != current)
+ if (proc->tsk != current->group_leader)
return -EINVAL;
if ((vma->vm_end - vma->vm_start) > SZ_4M)
vma->vm_end = vma->vm_start + SZ_4M;
binder_debug(BINDER_DEBUG_OPEN_CLOSE,
- "binder_mmap: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
- proc->pid, vma->vm_start, vma->vm_end,
+ "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
+ __func__, proc->pid, vma->vm_start, vma->vm_end,
(vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
(unsigned long)pgprot_val(vma->vm_page_prot));
@@ -3380,73 +4569,15 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
goto err_bad_arg;
}
vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE;
-
- mutex_lock(&binder_mmap_lock);
- if (proc->buffer) {
- ret = -EBUSY;
- failure_string = "already mapped";
- goto err_already_mapped;
- }
-
- area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP);
- if (area == NULL) {
- ret = -ENOMEM;
- failure_string = "get_vm_area";
- goto err_get_vm_area_failed;
- }
- proc->buffer = area->addr;
- proc->user_buffer_offset = vma->vm_start - (uintptr_t)proc->buffer;
- mutex_unlock(&binder_mmap_lock);
-
-#ifdef CONFIG_CPU_CACHE_VIPT
- if (cache_is_vipt_aliasing()) {
- while (CACHE_COLOUR((vma->vm_start ^ (uint32_t)proc->buffer))) {
- pr_info("binder_mmap: %d %lx-%lx maps %p bad alignment\n", proc->pid, vma->vm_start, vma->vm_end, proc->buffer);
- vma->vm_start += PAGE_SIZE;
- }
- }
-#endif
- proc->pages = kzalloc(sizeof(proc->pages[0]) * ((vma->vm_end - vma->vm_start) / PAGE_SIZE), GFP_KERNEL);
- if (proc->pages == NULL) {
- ret = -ENOMEM;
- failure_string = "alloc page array";
- goto err_alloc_pages_failed;
- }
- proc->buffer_size = vma->vm_end - vma->vm_start;
-
vma->vm_ops = &binder_vm_ops;
vma->vm_private_data = proc;
- if (binder_update_page_range(proc, 1, proc->buffer, proc->buffer + PAGE_SIZE, vma)) {
- ret = -ENOMEM;
- failure_string = "alloc small buf";
- goto err_alloc_small_buf_failed;
- }
- buffer = proc->buffer;
- INIT_LIST_HEAD(&proc->buffers);
- list_add(&buffer->entry, &proc->buffers);
- buffer->free = 1;
- binder_insert_free_buffer(proc, buffer);
- proc->free_async_space = proc->buffer_size / 2;
- barrier();
+ ret = binder_alloc_mmap_handler(&proc->alloc, vma);
+ if (ret)
+ return ret;
proc->files = get_files_struct(current);
- proc->vma = vma;
- proc->vma_vm_mm = vma->vm_mm;
-
- /*pr_info("binder_mmap: %d %lx-%lx maps %p\n",
- proc->pid, vma->vm_start, vma->vm_end, proc->buffer);*/
return 0;
-err_alloc_small_buf_failed:
- kfree(proc->pages);
- proc->pages = NULL;
-err_alloc_pages_failed:
- mutex_lock(&binder_mmap_lock);
- vfree(proc->buffer);
- proc->buffer = NULL;
-err_get_vm_area_failed:
-err_already_mapped:
- mutex_unlock(&binder_mmap_lock);
err_bad_arg:
pr_err("binder_mmap: %d %lx-%lx %s failed %d\n",
proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
@@ -3464,24 +4595,26 @@ static int binder_open(struct inode *nodp, struct file *filp)
proc = kzalloc(sizeof(*proc), GFP_KERNEL);
if (proc == NULL)
return -ENOMEM;
+ spin_lock_init(&proc->inner_lock);
+ spin_lock_init(&proc->outer_lock);
get_task_struct(current->group_leader);
proc->tsk = current->group_leader;
INIT_LIST_HEAD(&proc->todo);
- init_waitqueue_head(&proc->wait);
proc->default_priority = task_nice(current);
binder_dev = container_of(filp->private_data, struct binder_device,
miscdev);
proc->context = &binder_dev->context;
-
- binder_lock(__func__);
+ binder_alloc_init(&proc->alloc);
binder_stats_created(BINDER_STAT_PROC);
- hlist_add_head(&proc->proc_node, &binder_procs);
proc->pid = current->group_leader->pid;
INIT_LIST_HEAD(&proc->delivered_death);
+ INIT_LIST_HEAD(&proc->waiting_threads);
filp->private_data = proc;
- binder_unlock(__func__);
+ mutex_lock(&binder_procs_lock);
+ hlist_add_head(&proc->proc_node, &binder_procs);
+ mutex_unlock(&binder_procs_lock);
if (binder_debugfs_dir_entry_proc) {
char strbuf[11];
@@ -3517,16 +4650,17 @@ static void binder_deferred_flush(struct binder_proc *proc)
struct rb_node *n;
int wake_count = 0;
+ binder_inner_proc_lock(proc);
for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
- thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN;
+ thread->looper_need_return = true;
if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
wake_up_interruptible(&thread->wait);
wake_count++;
}
}
- wake_up_interruptible_all(&proc->wait);
+ binder_inner_proc_unlock(proc);
binder_debug(BINDER_DEBUG_OPEN_CLOSE,
"binder_flush: %d woke %d threads\n", proc->pid,
@@ -3547,13 +4681,21 @@ static int binder_node_release(struct binder_node *node, int refs)
{
struct binder_ref *ref;
int death = 0;
+ struct binder_proc *proc = node->proc;
- list_del_init(&node->work.entry);
- binder_release_work(&node->async_todo);
+ binder_release_work(proc, &node->async_todo);
- if (hlist_empty(&node->refs)) {
- kfree(node);
- binder_stats_deleted(BINDER_STAT_NODE);
+ binder_node_lock(node);
+ binder_inner_proc_lock(proc);
+ binder_dequeue_work_ilocked(&node->work);
+ /*
+ * The caller must have taken a temporary ref on the node,
+ */
+ BUG_ON(!node->tmp_refs);
+ if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
+ binder_inner_proc_unlock(proc);
+ binder_node_unlock(node);
+ binder_free_node(node);
return refs;
}
@@ -3561,45 +4703,58 @@ static int binder_node_release(struct binder_node *node, int refs)
node->proc = NULL;
node->local_strong_refs = 0;
node->local_weak_refs = 0;
+ binder_inner_proc_unlock(proc);
+
+ spin_lock(&binder_dead_nodes_lock);
hlist_add_head(&node->dead_node, &binder_dead_nodes);
+ spin_unlock(&binder_dead_nodes_lock);
hlist_for_each_entry(ref, &node->refs, node_entry) {
refs++;
-
- if (!ref->death)
+ /*
+ * Need the node lock to synchronize
+ * with new notification requests and the
+ * inner lock to synchronize with queued
+ * death notifications.
+ */
+ binder_inner_proc_lock(ref->proc);
+ if (!ref->death) {
+ binder_inner_proc_unlock(ref->proc);
continue;
+ }
death++;
- if (list_empty(&ref->death->work.entry)) {
- ref->death->work.type = BINDER_WORK_DEAD_BINDER;
- list_add_tail(&ref->death->work.entry,
- &ref->proc->todo);
- wake_up_interruptible(&ref->proc->wait);
- } else
- BUG();
+ BUG_ON(!list_empty(&ref->death->work.entry));
+ ref->death->work.type = BINDER_WORK_DEAD_BINDER;
+ binder_enqueue_work_ilocked(&ref->death->work,
+ &ref->proc->todo);
+ binder_wakeup_proc_ilocked(ref->proc);
+ binder_inner_proc_unlock(ref->proc);
}
binder_debug(BINDER_DEBUG_DEAD_BINDER,
"node %d now dead, refs %d, death %d\n",
node->debug_id, refs, death);
+ binder_node_unlock(node);
+ binder_put_node(node);
return refs;
}
static void binder_deferred_release(struct binder_proc *proc)
{
- struct binder_transaction *t;
struct binder_context *context = proc->context;
struct rb_node *n;
- int threads, nodes, incoming_refs, outgoing_refs, buffers,
- active_transactions, page_count;
+ int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
- BUG_ON(proc->vma);
BUG_ON(proc->files);
+ mutex_lock(&binder_procs_lock);
hlist_del(&proc->proc_node);
+ mutex_unlock(&binder_procs_lock);
+ mutex_lock(&context->context_mgr_node_lock);
if (context->binder_context_mgr_node &&
context->binder_context_mgr_node->proc == proc) {
binder_debug(BINDER_DEBUG_DEAD_BINDER,
@@ -3607,15 +4762,25 @@ static void binder_deferred_release(struct binder_proc *proc)
__func__, proc->pid);
context->binder_context_mgr_node = NULL;
}
+ mutex_unlock(&context->context_mgr_node_lock);
+ binder_inner_proc_lock(proc);
+ /*
+ * Make sure proc stays alive after we
+ * remove all the threads
+ */
+ proc->tmp_ref++;
+ proc->is_dead = true;
threads = 0;
active_transactions = 0;
while ((n = rb_first(&proc->threads))) {
struct binder_thread *thread;
thread = rb_entry(n, struct binder_thread, rb_node);
+ binder_inner_proc_unlock(proc);
threads++;
- active_transactions += binder_free_thread(proc, thread);
+ active_transactions += binder_thread_release(proc, thread);
+ binder_inner_proc_lock(proc);
}
nodes = 0;
@@ -3625,73 +4790,42 @@ static void binder_deferred_release(struct binder_proc *proc)
node = rb_entry(n, struct binder_node, rb_node);
nodes++;
+ /*
+ * take a temporary ref on the node before
+ * calling binder_node_release() which will either
+ * kfree() the node or call binder_put_node()
+ */
+ binder_inc_node_tmpref_ilocked(node);
rb_erase(&node->rb_node, &proc->nodes);
+ binder_inner_proc_unlock(proc);
incoming_refs = binder_node_release(node, incoming_refs);
+ binder_inner_proc_lock(proc);
}
+ binder_inner_proc_unlock(proc);
outgoing_refs = 0;
+ binder_proc_lock(proc);
while ((n = rb_first(&proc->refs_by_desc))) {
struct binder_ref *ref;
ref = rb_entry(n, struct binder_ref, rb_node_desc);
outgoing_refs++;
- binder_delete_ref(ref);
+ binder_cleanup_ref_olocked(ref);
+ binder_proc_unlock(proc);
+ binder_free_ref(ref);
+ binder_proc_lock(proc);
}
+ binder_proc_unlock(proc);
- binder_release_work(&proc->todo);
- binder_release_work(&proc->delivered_death);
-
- buffers = 0;
- while ((n = rb_first(&proc->allocated_buffers))) {
- struct binder_buffer *buffer;
-
- buffer = rb_entry(n, struct binder_buffer, rb_node);
-
- t = buffer->transaction;
- if (t) {
- t->buffer = NULL;
- buffer->transaction = NULL;
- pr_err("release proc %d, transaction %d, not freed\n",
- proc->pid, t->debug_id);
- /*BUG();*/
- }
-
- binder_free_buf(proc, buffer);
- buffers++;
- }
-
- binder_stats_deleted(BINDER_STAT_PROC);
-
- page_count = 0;
- if (proc->pages) {
- int i;
-
- for (i = 0; i < proc->buffer_size / PAGE_SIZE; i++) {
- void *page_addr;
-
- if (!proc->pages[i])
- continue;
-
- page_addr = proc->buffer + i * PAGE_SIZE;
- binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%s: %d: page %d at %p not freed\n",
- __func__, proc->pid, i, page_addr);
- unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
- __free_page(proc->pages[i]);
- page_count++;
- }
- kfree(proc->pages);
- vfree(proc->buffer);
- }
-
- put_task_struct(proc->tsk);
+ binder_release_work(proc, &proc->todo);
+ binder_release_work(proc, &proc->delivered_death);
binder_debug(BINDER_DEBUG_OPEN_CLOSE,
- "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d, buffers %d, pages %d\n",
+ "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
__func__, proc->pid, threads, nodes, incoming_refs,
- outgoing_refs, active_transactions, buffers, page_count);
+ outgoing_refs, active_transactions);
- kfree(proc);
+ binder_proc_dec_tmpref(proc);
}
static void binder_deferred_func(struct work_struct *work)
@@ -3702,7 +4836,6 @@ static void binder_deferred_func(struct work_struct *work)
int defer;
do {
- binder_lock(__func__);
mutex_lock(&binder_deferred_lock);
if (!hlist_empty(&binder_deferred_list)) {
proc = hlist_entry(binder_deferred_list.first,
@@ -3729,7 +4862,6 @@ static void binder_deferred_func(struct work_struct *work)
if (defer & BINDER_DEFERRED_RELEASE)
binder_deferred_release(proc); /* frees proc */
- binder_unlock(__func__);
if (files)
put_files_struct(files);
} while (proc);
@@ -3749,41 +4881,51 @@ binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
mutex_unlock(&binder_deferred_lock);
}
-static void print_binder_transaction(struct seq_file *m, const char *prefix,
- struct binder_transaction *t)
+static void print_binder_transaction_ilocked(struct seq_file *m,
+ struct binder_proc *proc,
+ const char *prefix,
+ struct binder_transaction *t)
{
+ struct binder_proc *to_proc;
+ struct binder_buffer *buffer = t->buffer;
+
+ spin_lock(&t->lock);
+ to_proc = t->to_proc;
seq_printf(m,
"%s %d: %p from %d:%d to %d:%d code %x flags %x pri %ld r%d",
prefix, t->debug_id, t,
t->from ? t->from->proc->pid : 0,
t->from ? t->from->pid : 0,
- t->to_proc ? t->to_proc->pid : 0,
+ to_proc ? to_proc->pid : 0,
t->to_thread ? t->to_thread->pid : 0,
t->code, t->flags, t->priority, t->need_reply);
- if (t->buffer == NULL) {
+ spin_unlock(&t->lock);
+
+ if (proc != to_proc) {
+ /*
+ * Can only safely deref buffer if we are holding the
+ * correct proc inner lock for this node
+ */
+ seq_puts(m, "\n");
+ return;
+ }
+
+ if (buffer == NULL) {
seq_puts(m, " buffer free\n");
return;
}
- if (t->buffer->target_node)
- seq_printf(m, " node %d",
- t->buffer->target_node->debug_id);
+ if (buffer->target_node)
+ seq_printf(m, " node %d", buffer->target_node->debug_id);
seq_printf(m, " size %zd:%zd data %p\n",
- t->buffer->data_size, t->buffer->offsets_size,
- t->buffer->data);
-}
-
-static void print_binder_buffer(struct seq_file *m, const char *prefix,
- struct binder_buffer *buffer)
-{
- seq_printf(m, "%s %d: %p size %zd:%zd %s\n",
- prefix, buffer->debug_id, buffer->data,
buffer->data_size, buffer->offsets_size,
- buffer->transaction ? "active" : "delivered");
+ buffer->data);
}
-static void print_binder_work(struct seq_file *m, const char *prefix,
- const char *transaction_prefix,
- struct binder_work *w)
+static void print_binder_work_ilocked(struct seq_file *m,
+ struct binder_proc *proc,
+ const char *prefix,
+ const char *transaction_prefix,
+ struct binder_work *w)
{
struct binder_node *node;
struct binder_transaction *t;
@@ -3791,8 +4933,16 @@ static void print_binder_work(struct seq_file *m, const char *prefix,
switch (w->type) {
case BINDER_WORK_TRANSACTION:
t = container_of(w, struct binder_transaction, work);
- print_binder_transaction(m, transaction_prefix, t);
+ print_binder_transaction_ilocked(
+ m, proc, transaction_prefix, t);
break;
+ case BINDER_WORK_RETURN_ERROR: {
+ struct binder_error *e = container_of(
+ w, struct binder_error, work);
+
+ seq_printf(m, "%stransaction error: %u\n",
+ prefix, e->cmd);
+ } break;
case BINDER_WORK_TRANSACTION_COMPLETE:
seq_printf(m, "%stransaction complete\n", prefix);
break;
@@ -3817,40 +4967,46 @@ static void print_binder_work(struct seq_file *m, const char *prefix,
}
}
-static void print_binder_thread(struct seq_file *m,
- struct binder_thread *thread,
- int print_always)
+static void print_binder_thread_ilocked(struct seq_file *m,
+ struct binder_thread *thread,
+ int print_always)
{
struct binder_transaction *t;
struct binder_work *w;
size_t start_pos = m->count;
size_t header_pos;
- seq_printf(m, " thread %d: l %02x\n", thread->pid, thread->looper);
+ seq_printf(m, " thread %d: l %02x need_return %d tr %d\n",
+ thread->pid, thread->looper,
+ thread->looper_need_return,
+ atomic_read(&thread->tmp_ref));
header_pos = m->count;
t = thread->transaction_stack;
while (t) {
if (t->from == thread) {
- print_binder_transaction(m,
- " outgoing transaction", t);
+ print_binder_transaction_ilocked(m, thread->proc,
+ " outgoing transaction", t);
t = t->from_parent;
} else if (t->to_thread == thread) {
- print_binder_transaction(m,
+ print_binder_transaction_ilocked(m, thread->proc,
" incoming transaction", t);
t = t->to_parent;
} else {
- print_binder_transaction(m, " bad transaction", t);
+ print_binder_transaction_ilocked(m, thread->proc,
+ " bad transaction", t);
t = NULL;
}
}
list_for_each_entry(w, &thread->todo, entry) {
- print_binder_work(m, " ", " pending transaction", w);
+ print_binder_work_ilocked(m, thread->proc, " ",
+ " pending transaction", w);
}
if (!print_always && m->count == header_pos)
m->count = start_pos;
}
-static void print_binder_node(struct seq_file *m, struct binder_node *node)
+static void print_binder_node_nilocked(struct seq_file *m,
+ struct binder_node *node)
{
struct binder_ref *ref;
struct binder_work *w;
@@ -3860,27 +5016,34 @@ static void print_binder_node(struct seq_file *m, struct binder_node *node)
hlist_for_each_entry(ref, &node->refs, node_entry)
count++;
- seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d",
+ seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d",
node->debug_id, (u64)node->ptr, (u64)node->cookie,
node->has_strong_ref, node->has_weak_ref,
node->local_strong_refs, node->local_weak_refs,
- node->internal_strong_refs, count);
+ node->internal_strong_refs, count, node->tmp_refs);
if (count) {
seq_puts(m, " proc");
hlist_for_each_entry(ref, &node->refs, node_entry)
seq_printf(m, " %d", ref->proc->pid);
}
seq_puts(m, "\n");
- list_for_each_entry(w, &node->async_todo, entry)
- print_binder_work(m, " ",
- " pending async transaction", w);
+ if (node->proc) {
+ list_for_each_entry(w, &node->async_todo, entry)
+ print_binder_work_ilocked(m, node->proc, " ",
+ " pending async transaction", w);
+ }
}
-static void print_binder_ref(struct seq_file *m, struct binder_ref *ref)
+static void print_binder_ref_olocked(struct seq_file *m,
+ struct binder_ref *ref)
{
- seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %p\n",
- ref->debug_id, ref->desc, ref->node->proc ? "" : "dead ",
- ref->node->debug_id, ref->strong, ref->weak, ref->death);
+ binder_node_lock(ref->node);
+ seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %pK\n",
+ ref->data.debug_id, ref->data.desc,
+ ref->node->proc ? "" : "dead ",
+ ref->node->debug_id, ref->data.strong,
+ ref->data.weak, ref->death);
+ binder_node_unlock(ref->node);
}
static void print_binder_proc(struct seq_file *m,
@@ -3890,36 +5053,60 @@ static void print_binder_proc(struct seq_file *m,
struct rb_node *n;
size_t start_pos = m->count;
size_t header_pos;
+ struct binder_node *last_node = NULL;
seq_printf(m, "proc %d\n", proc->pid);
seq_printf(m, "context %s\n", proc->context->name);
header_pos = m->count;
+ binder_inner_proc_lock(proc);
for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
- print_binder_thread(m, rb_entry(n, struct binder_thread,
+ print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
rb_node), print_all);
+
for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
struct binder_node *node = rb_entry(n, struct binder_node,
rb_node);
- if (print_all || node->has_async_transaction)
- print_binder_node(m, node);
- }
+ /*
+ * take a temporary reference on the node so it
+ * survives and isn't removed from the tree
+ * while we print it.
+ */
+ binder_inc_node_tmpref_ilocked(node);
+ /* Need to drop inner lock to take node lock */
+ binder_inner_proc_unlock(proc);
+ if (last_node)
+ binder_put_node(last_node);
+ binder_node_inner_lock(node);
+ print_binder_node_nilocked(m, node);
+ binder_node_inner_unlock(node);
+ last_node = node;
+ binder_inner_proc_lock(proc);
+ }
+ binder_inner_proc_unlock(proc);
+ if (last_node)
+ binder_put_node(last_node);
+
if (print_all) {
+ binder_proc_lock(proc);
for (n = rb_first(&proc->refs_by_desc);
n != NULL;
n = rb_next(n))
- print_binder_ref(m, rb_entry(n, struct binder_ref,
- rb_node_desc));
+ print_binder_ref_olocked(m, rb_entry(n,
+ struct binder_ref,
+ rb_node_desc));
+ binder_proc_unlock(proc);
}
- for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n))
- print_binder_buffer(m, " buffer",
- rb_entry(n, struct binder_buffer, rb_node));
+ binder_alloc_print_allocated(m, &proc->alloc);
+ binder_inner_proc_lock(proc);
list_for_each_entry(w, &proc->todo, entry)
- print_binder_work(m, " ", " pending transaction", w);
+ print_binder_work_ilocked(m, proc, " ",
+ " pending transaction", w);
list_for_each_entry(w, &proc->delivered_death, entry) {
seq_puts(m, " has delivered dead binder\n");
break;
}
+ binder_inner_proc_unlock(proc);
if (!print_all && m->count == header_pos)
m->count = start_pos;
}
@@ -3985,17 +5172,21 @@ static void print_binder_stats(struct seq_file *m, const char *prefix,
BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
ARRAY_SIZE(binder_command_strings));
for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
- if (stats->bc[i])
+ int temp = atomic_read(&stats->bc[i]);
+
+ if (temp)
seq_printf(m, "%s%s: %d\n", prefix,
- binder_command_strings[i], stats->bc[i]);
+ binder_command_strings[i], temp);
}
BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
ARRAY_SIZE(binder_return_strings));
for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
- if (stats->br[i])
+ int temp = atomic_read(&stats->br[i]);
+
+ if (temp)
seq_printf(m, "%s%s: %d\n", prefix,
- binder_return_strings[i], stats->br[i]);
+ binder_return_strings[i], temp);
}
BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
@@ -4003,11 +5194,15 @@ static void print_binder_stats(struct seq_file *m, const char *prefix,
BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
ARRAY_SIZE(stats->obj_deleted));
for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
- if (stats->obj_created[i] || stats->obj_deleted[i])
- seq_printf(m, "%s%s: active %d total %d\n", prefix,
+ int created = atomic_read(&stats->obj_created[i]);
+ int deleted = atomic_read(&stats->obj_deleted[i]);
+
+ if (created || deleted)
+ seq_printf(m, "%s%s: active %d total %d\n",
+ prefix,
binder_objstat_strings[i],
- stats->obj_created[i] - stats->obj_deleted[i],
- stats->obj_created[i]);
+ created - deleted,
+ created);
}
}
@@ -4015,51 +5210,61 @@ static void print_binder_proc_stats(struct seq_file *m,
struct binder_proc *proc)
{
struct binder_work *w;
+ struct binder_thread *thread;
struct rb_node *n;
- int count, strong, weak;
+ int count, strong, weak, ready_threads;
+ size_t free_async_space =
+ binder_alloc_get_free_async_space(&proc->alloc);
seq_printf(m, "proc %d\n", proc->pid);
seq_printf(m, "context %s\n", proc->context->name);
count = 0;
+ ready_threads = 0;
+ binder_inner_proc_lock(proc);
for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
count++;
+
+ list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
+ ready_threads++;
+
seq_printf(m, " threads: %d\n", count);
seq_printf(m, " requested threads: %d+%d/%d\n"
" ready threads %d\n"
" free async space %zd\n", proc->requested_threads,
proc->requested_threads_started, proc->max_threads,
- proc->ready_threads, proc->free_async_space);
+ ready_threads,
+ free_async_space);
count = 0;
for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
count++;
+ binder_inner_proc_unlock(proc);
seq_printf(m, " nodes: %d\n", count);
count = 0;
strong = 0;
weak = 0;
+ binder_proc_lock(proc);
for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
struct binder_ref *ref = rb_entry(n, struct binder_ref,
rb_node_desc);
count++;
- strong += ref->strong;
- weak += ref->weak;
+ strong += ref->data.strong;
+ weak += ref->data.weak;
}
+ binder_proc_unlock(proc);
seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak);
- count = 0;
- for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n))
- count++;
+ count = binder_alloc_get_allocated_count(&proc->alloc);
seq_printf(m, " buffers: %d\n", count);
+ binder_alloc_print_pages(m, &proc->alloc);
+
count = 0;
+ binder_inner_proc_lock(proc);
list_for_each_entry(w, &proc->todo, entry) {
- switch (w->type) {
- case BINDER_WORK_TRANSACTION:
+ if (w->type == BINDER_WORK_TRANSACTION)
count++;
- break;
- default:
- break;
- }
}
+ binder_inner_proc_unlock(proc);
seq_printf(m, " pending transactions: %d\n", count);
print_binder_stats(m, " ", &proc->stats);
@@ -4070,57 +5275,67 @@ static int binder_state_show(struct seq_file *m, void *unused)
{
struct binder_proc *proc;
struct binder_node *node;
- int do_lock = !binder_debug_no_lock;
-
- if (do_lock)
- binder_lock(__func__);
+ struct binder_node *last_node = NULL;
seq_puts(m, "binder state:\n");
+ spin_lock(&binder_dead_nodes_lock);
if (!hlist_empty(&binder_dead_nodes))
seq_puts(m, "dead nodes:\n");
- hlist_for_each_entry(node, &binder_dead_nodes, dead_node)
- print_binder_node(m, node);
-
+ hlist_for_each_entry(node, &binder_dead_nodes, dead_node) {
+ /*
+ * take a temporary reference on the node so it
+ * survives and isn't removed from the list
+ * while we print it.
+ */
+ node->tmp_refs++;
+ spin_unlock(&binder_dead_nodes_lock);
+ if (last_node)
+ binder_put_node(last_node);
+ binder_node_lock(node);
+ print_binder_node_nilocked(m, node);
+ binder_node_unlock(node);
+ last_node = node;
+ spin_lock(&binder_dead_nodes_lock);
+ }
+ spin_unlock(&binder_dead_nodes_lock);
+ if (last_node)
+ binder_put_node(last_node);
+
+ mutex_lock(&binder_procs_lock);
hlist_for_each_entry(proc, &binder_procs, proc_node)
print_binder_proc(m, proc, 1);
- if (do_lock)
- binder_unlock(__func__);
+ mutex_unlock(&binder_procs_lock);
+
return 0;
}
static int binder_stats_show(struct seq_file *m, void *unused)
{
struct binder_proc *proc;
- int do_lock = !binder_debug_no_lock;
-
- if (do_lock)
- binder_lock(__func__);
seq_puts(m, "binder stats:\n");
print_binder_stats(m, "", &binder_stats);
+ mutex_lock(&binder_procs_lock);
hlist_for_each_entry(proc, &binder_procs, proc_node)
print_binder_proc_stats(m, proc);
- if (do_lock)
- binder_unlock(__func__);
+ mutex_unlock(&binder_procs_lock);
+
return 0;
}
static int binder_transactions_show(struct seq_file *m, void *unused)
{
struct binder_proc *proc;
- int do_lock = !binder_debug_no_lock;
-
- if (do_lock)
- binder_lock(__func__);
seq_puts(m, "binder transactions:\n");
+ mutex_lock(&binder_procs_lock);
hlist_for_each_entry(proc, &binder_procs, proc_node)
print_binder_proc(m, proc, 0);
- if (do_lock)
- binder_unlock(__func__);
+ mutex_unlock(&binder_procs_lock);
+
return 0;
}
@@ -4128,44 +5343,63 @@ static int binder_proc_show(struct seq_file *m, void *unused)
{
struct binder_proc *itr;
int pid = (unsigned long)m->private;
- int do_lock = !binder_debug_no_lock;
-
- if (do_lock)
- binder_lock(__func__);
+ mutex_lock(&binder_procs_lock);
hlist_for_each_entry(itr, &binder_procs, proc_node) {
if (itr->pid == pid) {
seq_puts(m, "binder proc state:\n");
print_binder_proc(m, itr, 1);
}
}
- if (do_lock)
- binder_unlock(__func__);
+ mutex_unlock(&binder_procs_lock);
+
return 0;
}
static void print_binder_transaction_log_entry(struct seq_file *m,
struct binder_transaction_log_entry *e)
{
+ int debug_id = READ_ONCE(e->debug_id_done);
+ /*
+ * read barrier to guarantee debug_id_done read before
+ * we print the log values
+ */
+ smp_rmb();
seq_printf(m,
- "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d\n",
+ "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
e->debug_id, (e->call_type == 2) ? "reply" :
((e->call_type == 1) ? "async" : "call "), e->from_proc,
e->from_thread, e->to_proc, e->to_thread, e->context_name,
- e->to_node, e->target_handle, e->data_size, e->offsets_size);
+ e->to_node, e->target_handle, e->data_size, e->offsets_size,
+ e->return_error, e->return_error_param,
+ e->return_error_line);
+ /*
+ * read-barrier to guarantee read of debug_id_done after
+ * done printing the fields of the entry
+ */
+ smp_rmb();
+ seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
+ "\n" : " (incomplete)\n");
}
static int binder_transaction_log_show(struct seq_file *m, void *unused)
{
struct binder_transaction_log *log = m->private;
+ unsigned int log_cur = atomic_read(&log->cur);
+ unsigned int count;
+ unsigned int cur;
int i;
- if (log->full) {
- for (i = log->next; i < ARRAY_SIZE(log->entry); i++)
- print_binder_transaction_log_entry(m, &log->entry[i]);
+ count = log_cur + 1;
+ cur = count < ARRAY_SIZE(log->entry) && !log->full ?
+ 0 : count % ARRAY_SIZE(log->entry);
+ if (count > ARRAY_SIZE(log->entry) || log->full)
+ count = ARRAY_SIZE(log->entry);
+ for (i = 0; i < count; i++) {
+ unsigned int index = cur++ % ARRAY_SIZE(log->entry);
+
+ print_binder_transaction_log_entry(m, &log->entry[index]);
}
- for (i = 0; i < log->next; i++)
- print_binder_transaction_log_entry(m, &log->entry[i]);
return 0;
}
@@ -4200,6 +5434,7 @@ static int __init init_binder_device(const char *name)
binder_device->context.binder_context_mgr_uid = INVALID_UID;
binder_device->context.name = name;
+ mutex_init(&binder_device->context.context_mgr_node_lock);
ret = misc_register(&binder_device->miscdev);
if (ret < 0) {
@@ -4215,10 +5450,15 @@ static int __init init_binder_device(const char *name)
static int __init binder_init(void)
{
int ret;
- char *device_name, *device_names;
+ char *device_name, *device_names, *device_tmp;
struct binder_device *device;
struct hlist_node *tmp;
+ binder_alloc_shrinker_init();
+
+ atomic_set(&binder_transaction_log.cur, ~0U);
+ atomic_set(&binder_transaction_log_failed.cur, ~0U);
+
binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
if (binder_debugfs_dir_entry_root)
binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
@@ -4263,7 +5503,8 @@ static int __init binder_init(void)
}
strcpy(device_names, binder_devices_param);
- while ((device_name = strsep(&device_names, ","))) {
+ device_tmp = device_names;
+ while ((device_name = strsep(&device_tmp, ","))) {
ret = init_binder_device(device_name);
if (ret)
goto err_init_binder_device_failed;
@@ -4277,6 +5518,9 @@ err_init_binder_device_failed:
hlist_del(&device->hlist);
kfree(device);
}
+
+ kfree(device_names);
+
err_alloc_device_names_failed:
debugfs_remove_recursive(binder_debugfs_dir_entry_root);
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
new file mode 100644
index 000000000000..8fe165844e47
--- /dev/null
+++ b/drivers/android/binder_alloc.c
@@ -0,0 +1,1009 @@
+/* binder_alloc.c
+ *
+ * Android IPC Subsystem
+ *
+ * Copyright (C) 2007-2017 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <asm/cacheflush.h>
+#include <linux/list.h>
+#include <linux/sched/mm.h>
+#include <linux/module.h>
+#include <linux/rtmutex.h>
+#include <linux/rbtree.h>
+#include <linux/seq_file.h>
+#include <linux/vmalloc.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/list_lru.h>
+#include "binder_alloc.h"
+#include "binder_trace.h"
+
+struct list_lru binder_alloc_lru;
+
+static DEFINE_MUTEX(binder_alloc_mmap_lock);
+
+enum {
+ BINDER_DEBUG_OPEN_CLOSE = 1U << 1,
+ BINDER_DEBUG_BUFFER_ALLOC = 1U << 2,
+ BINDER_DEBUG_BUFFER_ALLOC_ASYNC = 1U << 3,
+};
+static uint32_t binder_alloc_debug_mask;
+
+module_param_named(debug_mask, binder_alloc_debug_mask,
+ uint, 0644);
+
+#define binder_alloc_debug(mask, x...) \
+ do { \
+ if (binder_alloc_debug_mask & mask) \
+ pr_info(x); \
+ } while (0)
+
+static struct binder_buffer *binder_buffer_next(struct binder_buffer *buffer)
+{
+ return list_entry(buffer->entry.next, struct binder_buffer, entry);
+}
+
+static struct binder_buffer *binder_buffer_prev(struct binder_buffer *buffer)
+{
+ return list_entry(buffer->entry.prev, struct binder_buffer, entry);
+}
+
+static size_t binder_alloc_buffer_size(struct binder_alloc *alloc,
+ struct binder_buffer *buffer)
+{
+ if (list_is_last(&buffer->entry, &alloc->buffers))
+ return (u8 *)alloc->buffer +
+ alloc->buffer_size - (u8 *)buffer->data;
+ return (u8 *)binder_buffer_next(buffer)->data - (u8 *)buffer->data;
+}
+
+static void binder_insert_free_buffer(struct binder_alloc *alloc,
+ struct binder_buffer *new_buffer)
+{
+ struct rb_node **p = &alloc->free_buffers.rb_node;
+ struct rb_node *parent = NULL;
+ struct binder_buffer *buffer;
+ size_t buffer_size;
+ size_t new_buffer_size;
+
+ BUG_ON(!new_buffer->free);
+
+ new_buffer_size = binder_alloc_buffer_size(alloc, new_buffer);
+
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "%d: add free buffer, size %zd, at %pK\n",
+ alloc->pid, new_buffer_size, new_buffer);
+
+ while (*p) {
+ parent = *p;
+ buffer = rb_entry(parent, struct binder_buffer, rb_node);
+ BUG_ON(!buffer->free);
+
+ buffer_size = binder_alloc_buffer_size(alloc, buffer);
+
+ if (new_buffer_size < buffer_size)
+ p = &parent->rb_left;
+ else
+ p = &parent->rb_right;
+ }
+ rb_link_node(&new_buffer->rb_node, parent, p);
+ rb_insert_color(&new_buffer->rb_node, &alloc->free_buffers);
+}
+
+static void binder_insert_allocated_buffer_locked(
+ struct binder_alloc *alloc, struct binder_buffer *new_buffer)
+{
+ struct rb_node **p = &alloc->allocated_buffers.rb_node;
+ struct rb_node *parent = NULL;
+ struct binder_buffer *buffer;
+
+ BUG_ON(new_buffer->free);
+
+ while (*p) {
+ parent = *p;
+ buffer = rb_entry(parent, struct binder_buffer, rb_node);
+ BUG_ON(buffer->free);
+
+ if (new_buffer->data < buffer->data)
+ p = &parent->rb_left;
+ else if (new_buffer->data > buffer->data)
+ p = &parent->rb_right;
+ else
+ BUG();
+ }
+ rb_link_node(&new_buffer->rb_node, parent, p);
+ rb_insert_color(&new_buffer->rb_node, &alloc->allocated_buffers);
+}
+
+static struct binder_buffer *binder_alloc_prepare_to_free_locked(
+ struct binder_alloc *alloc,
+ uintptr_t user_ptr)
+{
+ struct rb_node *n = alloc->allocated_buffers.rb_node;
+ struct binder_buffer *buffer;
+ void *kern_ptr;
+
+ kern_ptr = (void *)(user_ptr - alloc->user_buffer_offset);
+
+ while (n) {
+ buffer = rb_entry(n, struct binder_buffer, rb_node);
+ BUG_ON(buffer->free);
+
+ if (kern_ptr < buffer->data)
+ n = n->rb_left;
+ else if (kern_ptr > buffer->data)
+ n = n->rb_right;
+ else {
+ /*
+ * Guard against user threads attempting to
+ * free the buffer twice
+ */
+ if (buffer->free_in_progress) {
+ pr_err("%d:%d FREE_BUFFER u%016llx user freed buffer twice\n",
+ alloc->pid, current->pid, (u64)user_ptr);
+ return NULL;
+ }
+ buffer->free_in_progress = 1;
+ return buffer;
+ }
+ }
+ return NULL;
+}
+
+/**
+ * binder_alloc_buffer_lookup() - get buffer given user ptr
+ * @alloc: binder_alloc for this proc
+ * @user_ptr: User pointer to buffer data
+ *
+ * Validate userspace pointer to buffer data and return buffer corresponding to
+ * that user pointer. Search the rb tree for buffer that matches user data
+ * pointer.
+ *
+ * Return: Pointer to buffer or NULL
+ */
+struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc,
+ uintptr_t user_ptr)
+{
+ struct binder_buffer *buffer;
+
+ mutex_lock(&alloc->mutex);
+ buffer = binder_alloc_prepare_to_free_locked(alloc, user_ptr);
+ mutex_unlock(&alloc->mutex);
+ return buffer;
+}
+
+static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
+ void *start, void *end,
+ struct vm_area_struct *vma)
+{
+ void *page_addr;
+ unsigned long user_page_addr;
+ struct binder_lru_page *page;
+ struct mm_struct *mm = NULL;
+ bool need_mm = false;
+
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "%d: %s pages %pK-%pK\n", alloc->pid,
+ allocate ? "allocate" : "free", start, end);
+
+ if (end <= start)
+ return 0;
+
+ trace_binder_update_page_range(alloc, allocate, start, end);
+
+ if (allocate == 0)
+ goto free_range;
+
+ for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
+ page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE];
+ if (!page->page_ptr) {
+ need_mm = true;
+ break;
+ }
+ }
+
+ if (!vma && need_mm)
+ mm = get_task_mm(alloc->tsk);
+
+ if (mm) {
+ down_write(&mm->mmap_sem);
+ vma = alloc->vma;
+ if (vma && mm != alloc->vma_vm_mm) {
+ pr_err("%d: vma mm and task mm mismatch\n",
+ alloc->pid);
+ vma = NULL;
+ }
+ }
+
+ if (!vma && need_mm) {
+ pr_err("%d: binder_alloc_buf failed to map pages in userspace, no vma\n",
+ alloc->pid);
+ goto err_no_vma;
+ }
+
+ for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
+ int ret;
+ bool on_lru;
+ size_t index;
+
+ index = (page_addr - alloc->buffer) / PAGE_SIZE;
+ page = &alloc->pages[index];
+
+ if (page->page_ptr) {
+ trace_binder_alloc_lru_start(alloc, index);
+
+ on_lru = list_lru_del(&binder_alloc_lru, &page->lru);
+ WARN_ON(!on_lru);
+
+ trace_binder_alloc_lru_end(alloc, index);
+ continue;
+ }
+
+ if (WARN_ON(!vma))
+ goto err_page_ptr_cleared;
+
+ trace_binder_alloc_page_start(alloc, index);
+ page->page_ptr = alloc_page(GFP_KERNEL |
+ __GFP_HIGHMEM |
+ __GFP_ZERO);
+ if (!page->page_ptr) {
+ pr_err("%d: binder_alloc_buf failed for page at %pK\n",
+ alloc->pid, page_addr);
+ goto err_alloc_page_failed;
+ }
+ page->alloc = alloc;
+ INIT_LIST_HEAD(&page->lru);
+
+ ret = map_kernel_range_noflush((unsigned long)page_addr,
+ PAGE_SIZE, PAGE_KERNEL,
+ &page->page_ptr);
+ flush_cache_vmap((unsigned long)page_addr,
+ (unsigned long)page_addr + PAGE_SIZE);
+ if (ret != 1) {
+ pr_err("%d: binder_alloc_buf failed to map page at %pK in kernel\n",
+ alloc->pid, page_addr);
+ goto err_map_kernel_failed;
+ }
+ user_page_addr =
+ (uintptr_t)page_addr + alloc->user_buffer_offset;
+ ret = vm_insert_page(vma, user_page_addr, page[0].page_ptr);
+ if (ret) {
+ pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n",
+ alloc->pid, user_page_addr);
+ goto err_vm_insert_page_failed;
+ }
+
+ trace_binder_alloc_page_end(alloc, index);
+ /* vm_insert_page does not seem to increment the refcount */
+ }
+ if (mm) {
+ up_write(&mm->mmap_sem);
+ mmput(mm);
+ }
+ return 0;
+
+free_range:
+ for (page_addr = end - PAGE_SIZE; page_addr >= start;
+ page_addr -= PAGE_SIZE) {
+ bool ret;
+ size_t index;
+
+ index = (page_addr - alloc->buffer) / PAGE_SIZE;
+ page = &alloc->pages[index];
+
+ trace_binder_free_lru_start(alloc, index);
+
+ ret = list_lru_add(&binder_alloc_lru, &page->lru);
+ WARN_ON(!ret);
+
+ trace_binder_free_lru_end(alloc, index);
+ continue;
+
+err_vm_insert_page_failed:
+ unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
+err_map_kernel_failed:
+ __free_page(page->page_ptr);
+ page->page_ptr = NULL;
+err_alloc_page_failed:
+err_page_ptr_cleared:
+ ;
+ }
+err_no_vma:
+ if (mm) {
+ up_write(&mm->mmap_sem);
+ mmput(mm);
+ }
+ return vma ? -ENOMEM : -ESRCH;
+}
+
+struct binder_buffer *binder_alloc_new_buf_locked(struct binder_alloc *alloc,
+ size_t data_size,
+ size_t offsets_size,
+ size_t extra_buffers_size,
+ int is_async)
+{
+ struct rb_node *n = alloc->free_buffers.rb_node;
+ struct binder_buffer *buffer;
+ size_t buffer_size;
+ struct rb_node *best_fit = NULL;
+ void *has_page_addr;
+ void *end_page_addr;
+ size_t size, data_offsets_size;
+ int ret;
+
+ if (alloc->vma == NULL) {
+ pr_err("%d: binder_alloc_buf, no vma\n",
+ alloc->pid);
+ return ERR_PTR(-ESRCH);
+ }
+
+ data_offsets_size = ALIGN(data_size, sizeof(void *)) +
+ ALIGN(offsets_size, sizeof(void *));
+
+ if (data_offsets_size < data_size || data_offsets_size < offsets_size) {
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "%d: got transaction with invalid size %zd-%zd\n",
+ alloc->pid, data_size, offsets_size);
+ return ERR_PTR(-EINVAL);
+ }
+ size = data_offsets_size + ALIGN(extra_buffers_size, sizeof(void *));
+ if (size < data_offsets_size || size < extra_buffers_size) {
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "%d: got transaction with invalid extra_buffers_size %zd\n",
+ alloc->pid, extra_buffers_size);
+ return ERR_PTR(-EINVAL);
+ }
+ if (is_async &&
+ alloc->free_async_space < size + sizeof(struct binder_buffer)) {
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "%d: binder_alloc_buf size %zd failed, no async space left\n",
+ alloc->pid, size);
+ return ERR_PTR(-ENOSPC);
+ }
+
+ /* Pad 0-size buffers so they get assigned unique addresses */
+ size = max(size, sizeof(void *));
+
+ while (n) {
+ buffer = rb_entry(n, struct binder_buffer, rb_node);
+ BUG_ON(!buffer->free);
+ buffer_size = binder_alloc_buffer_size(alloc, buffer);
+
+ if (size < buffer_size) {
+ best_fit = n;
+ n = n->rb_left;
+ } else if (size > buffer_size)
+ n = n->rb_right;
+ else {
+ best_fit = n;
+ break;
+ }
+ }
+ if (best_fit == NULL) {
+ size_t allocated_buffers = 0;
+ size_t largest_alloc_size = 0;
+ size_t total_alloc_size = 0;
+ size_t free_buffers = 0;
+ size_t largest_free_size = 0;
+ size_t total_free_size = 0;
+
+ for (n = rb_first(&alloc->allocated_buffers); n != NULL;
+ n = rb_next(n)) {
+ buffer = rb_entry(n, struct binder_buffer, rb_node);
+ buffer_size = binder_alloc_buffer_size(alloc, buffer);
+ allocated_buffers++;
+ total_alloc_size += buffer_size;
+ if (buffer_size > largest_alloc_size)
+ largest_alloc_size = buffer_size;
+ }
+ for (n = rb_first(&alloc->free_buffers); n != NULL;
+ n = rb_next(n)) {
+ buffer = rb_entry(n, struct binder_buffer, rb_node);
+ buffer_size = binder_alloc_buffer_size(alloc, buffer);
+ free_buffers++;
+ total_free_size += buffer_size;
+ if (buffer_size > largest_free_size)
+ largest_free_size = buffer_size;
+ }
+ pr_err("%d: binder_alloc_buf size %zd failed, no address space\n",
+ alloc->pid, size);
+ pr_err("allocated: %zd (num: %zd largest: %zd), free: %zd (num: %zd largest: %zd)\n",
+ total_alloc_size, allocated_buffers, largest_alloc_size,
+ total_free_size, free_buffers, largest_free_size);
+ return ERR_PTR(-ENOSPC);
+ }
+ if (n == NULL) {
+ buffer = rb_entry(best_fit, struct binder_buffer, rb_node);
+ buffer_size = binder_alloc_buffer_size(alloc, buffer);
+ }
+
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "%d: binder_alloc_buf size %zd got buffer %pK size %zd\n",
+ alloc->pid, size, buffer, buffer_size);
+
+ has_page_addr =
+ (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK);
+ WARN_ON(n && buffer_size != size);
+ end_page_addr =
+ (void *)PAGE_ALIGN((uintptr_t)buffer->data + size);
+ if (end_page_addr > has_page_addr)
+ end_page_addr = has_page_addr;
+ ret = binder_update_page_range(alloc, 1,
+ (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL);
+ if (ret)
+ return ERR_PTR(ret);
+
+ if (buffer_size != size) {
+ struct binder_buffer *new_buffer;
+
+ new_buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
+ if (!new_buffer) {
+ pr_err("%s: %d failed to alloc new buffer struct\n",
+ __func__, alloc->pid);
+ goto err_alloc_buf_struct_failed;
+ }
+ new_buffer->data = (u8 *)buffer->data + size;
+ list_add(&new_buffer->entry, &buffer->entry);
+ new_buffer->free = 1;
+ binder_insert_free_buffer(alloc, new_buffer);
+ }
+
+ rb_erase(best_fit, &alloc->free_buffers);
+ buffer->free = 0;
+ buffer->free_in_progress = 0;
+ binder_insert_allocated_buffer_locked(alloc, buffer);
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "%d: binder_alloc_buf size %zd got %pK\n",
+ alloc->pid, size, buffer);
+ buffer->data_size = data_size;
+ buffer->offsets_size = offsets_size;
+ buffer->async_transaction = is_async;
+ buffer->extra_buffers_size = extra_buffers_size;
+ if (is_async) {
+ alloc->free_async_space -= size + sizeof(struct binder_buffer);
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
+ "%d: binder_alloc_buf size %zd async free %zd\n",
+ alloc->pid, size, alloc->free_async_space);
+ }
+ return buffer;
+
+err_alloc_buf_struct_failed:
+ binder_update_page_range(alloc, 0,
+ (void *)PAGE_ALIGN((uintptr_t)buffer->data),
+ end_page_addr, NULL);
+ return ERR_PTR(-ENOMEM);
+}
+
+/**
+ * binder_alloc_new_buf() - Allocate a new binder buffer
+ * @alloc: binder_alloc for this proc
+ * @data_size: size of user data buffer
+ * @offsets_size: user specified buffer offset
+ * @extra_buffers_size: size of extra space for meta-data (eg, security context)
+ * @is_async: buffer for async transaction
+ *
+ * Allocate a new buffer given the requested sizes. Returns
+ * the kernel version of the buffer pointer. The size allocated
+ * is the sum of the three given sizes (each rounded up to
+ * pointer-sized boundary)
+ *
+ * Return: The allocated buffer or %NULL if error
+ */
+struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
+ size_t data_size,
+ size_t offsets_size,
+ size_t extra_buffers_size,
+ int is_async)
+{
+ struct binder_buffer *buffer;
+
+ mutex_lock(&alloc->mutex);
+ buffer = binder_alloc_new_buf_locked(alloc, data_size, offsets_size,
+ extra_buffers_size, is_async);
+ mutex_unlock(&alloc->mutex);
+ return buffer;
+}
+
+static void *buffer_start_page(struct binder_buffer *buffer)
+{
+ return (void *)((uintptr_t)buffer->data & PAGE_MASK);
+}
+
+static void *prev_buffer_end_page(struct binder_buffer *buffer)
+{
+ return (void *)(((uintptr_t)(buffer->data) - 1) & PAGE_MASK);
+}
+
+static void binder_delete_free_buffer(struct binder_alloc *alloc,
+ struct binder_buffer *buffer)
+{
+ struct binder_buffer *prev, *next = NULL;
+ bool to_free = true;
+ BUG_ON(alloc->buffers.next == &buffer->entry);
+ prev = binder_buffer_prev(buffer);
+ BUG_ON(!prev->free);
+ if (prev_buffer_end_page(prev) == buffer_start_page(buffer)) {
+ to_free = false;
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "%d: merge free, buffer %pK share page with %pK\n",
+ alloc->pid, buffer->data, prev->data);
+ }
+
+ if (!list_is_last(&buffer->entry, &alloc->buffers)) {
+ next = binder_buffer_next(buffer);
+ if (buffer_start_page(next) == buffer_start_page(buffer)) {
+ to_free = false;
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "%d: merge free, buffer %pK share page with %pK\n",
+ alloc->pid,
+ buffer->data,
+ next->data);
+ }
+ }
+
+ if (PAGE_ALIGNED(buffer->data)) {
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "%d: merge free, buffer start %pK is page aligned\n",
+ alloc->pid, buffer->data);
+ to_free = false;
+ }
+
+ if (to_free) {
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "%d: merge free, buffer %pK do not share page with %pK or %pK\n",
+ alloc->pid, buffer->data,
+ prev->data, next->data);
+ binder_update_page_range(alloc, 0, buffer_start_page(buffer),
+ buffer_start_page(buffer) + PAGE_SIZE,
+ NULL);
+ }
+ list_del(&buffer->entry);
+ kfree(buffer);
+}
+
+static void binder_free_buf_locked(struct binder_alloc *alloc,
+ struct binder_buffer *buffer)
+{
+ size_t size, buffer_size;
+
+ buffer_size = binder_alloc_buffer_size(alloc, buffer);
+
+ size = ALIGN(buffer->data_size, sizeof(void *)) +
+ ALIGN(buffer->offsets_size, sizeof(void *)) +
+ ALIGN(buffer->extra_buffers_size, sizeof(void *));
+
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "%d: binder_free_buf %pK size %zd buffer_size %zd\n",
+ alloc->pid, buffer, size, buffer_size);
+
+ BUG_ON(buffer->free);
+ BUG_ON(size > buffer_size);
+ BUG_ON(buffer->transaction != NULL);
+ BUG_ON(buffer->data < alloc->buffer);
+ BUG_ON(buffer->data > alloc->buffer + alloc->buffer_size);
+
+ if (buffer->async_transaction) {
+ alloc->free_async_space += size + sizeof(struct binder_buffer);
+
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
+ "%d: binder_free_buf size %zd async free %zd\n",
+ alloc->pid, size, alloc->free_async_space);
+ }
+
+ binder_update_page_range(alloc, 0,
+ (void *)PAGE_ALIGN((uintptr_t)buffer->data),
+ (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK),
+ NULL);
+
+ rb_erase(&buffer->rb_node, &alloc->allocated_buffers);
+ buffer->free = 1;
+ if (!list_is_last(&buffer->entry, &alloc->buffers)) {
+ struct binder_buffer *next = binder_buffer_next(buffer);
+
+ if (next->free) {
+ rb_erase(&next->rb_node, &alloc->free_buffers);
+ binder_delete_free_buffer(alloc, next);
+ }
+ }
+ if (alloc->buffers.next != &buffer->entry) {
+ struct binder_buffer *prev = binder_buffer_prev(buffer);
+
+ if (prev->free) {
+ binder_delete_free_buffer(alloc, buffer);
+ rb_erase(&prev->rb_node, &alloc->free_buffers);
+ buffer = prev;
+ }
+ }
+ binder_insert_free_buffer(alloc, buffer);
+}
+
+/**
+ * binder_alloc_free_buf() - free a binder buffer
+ * @alloc: binder_alloc for this proc
+ * @buffer: kernel pointer to buffer
+ *
+ * Free the buffer allocated via binder_alloc_new_buffer()
+ */
+void binder_alloc_free_buf(struct binder_alloc *alloc,
+ struct binder_buffer *buffer)
+{
+ mutex_lock(&alloc->mutex);
+ binder_free_buf_locked(alloc, buffer);
+ mutex_unlock(&alloc->mutex);
+}
+
+/**
+ * binder_alloc_mmap_handler() - map virtual address space for proc
+ * @alloc: alloc structure for this proc
+ * @vma: vma passed to mmap()
+ *
+ * Called by binder_mmap() to initialize the space specified in
+ * vma for allocating binder buffers
+ *
+ * Return:
+ * 0 = success
+ * -EBUSY = address space already mapped
+ * -ENOMEM = failed to map memory to given address space
+ */
+int binder_alloc_mmap_handler(struct binder_alloc *alloc,
+ struct vm_area_struct *vma)
+{
+ int ret;
+ struct vm_struct *area;
+ const char *failure_string;
+ struct binder_buffer *buffer;
+
+ mutex_lock(&binder_alloc_mmap_lock);
+ if (alloc->buffer) {
+ ret = -EBUSY;
+ failure_string = "already mapped";
+ goto err_already_mapped;
+ }
+
+ area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP);
+ if (area == NULL) {
+ ret = -ENOMEM;
+ failure_string = "get_vm_area";
+ goto err_get_vm_area_failed;
+ }
+ alloc->buffer = area->addr;
+ alloc->user_buffer_offset =
+ vma->vm_start - (uintptr_t)alloc->buffer;
+ mutex_unlock(&binder_alloc_mmap_lock);
+
+#ifdef CONFIG_CPU_CACHE_VIPT
+ if (cache_is_vipt_aliasing()) {
+ while (CACHE_COLOUR(
+ (vma->vm_start ^ (uint32_t)alloc->buffer))) {
+ pr_info("%s: %d %lx-%lx maps %pK bad alignment\n",
+ __func__, alloc->pid, vma->vm_start,
+ vma->vm_end, alloc->buffer);
+ vma->vm_start += PAGE_SIZE;
+ }
+ }
+#endif
+ alloc->pages = kzalloc(sizeof(alloc->pages[0]) *
+ ((vma->vm_end - vma->vm_start) / PAGE_SIZE),
+ GFP_KERNEL);
+ if (alloc->pages == NULL) {
+ ret = -ENOMEM;
+ failure_string = "alloc page array";
+ goto err_alloc_pages_failed;
+ }
+ alloc->buffer_size = vma->vm_end - vma->vm_start;
+
+ buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
+ if (!buffer) {
+ ret = -ENOMEM;
+ failure_string = "alloc buffer struct";
+ goto err_alloc_buf_struct_failed;
+ }
+
+ buffer->data = alloc->buffer;
+ list_add(&buffer->entry, &alloc->buffers);
+ buffer->free = 1;
+ binder_insert_free_buffer(alloc, buffer);
+ alloc->free_async_space = alloc->buffer_size / 2;
+ barrier();
+ alloc->vma = vma;
+ alloc->vma_vm_mm = vma->vm_mm;
+
+ return 0;
+
+err_alloc_buf_struct_failed:
+ kfree(alloc->pages);
+ alloc->pages = NULL;
+err_alloc_pages_failed:
+ mutex_lock(&binder_alloc_mmap_lock);
+ vfree(alloc->buffer);
+ alloc->buffer = NULL;
+err_get_vm_area_failed:
+err_already_mapped:
+ mutex_unlock(&binder_alloc_mmap_lock);
+ pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
+ alloc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
+ return ret;
+}
+
+
+void binder_alloc_deferred_release(struct binder_alloc *alloc)
+{
+ struct rb_node *n;
+ int buffers, page_count;
+ struct binder_buffer *buffer;
+
+ BUG_ON(alloc->vma);
+
+ buffers = 0;
+ mutex_lock(&alloc->mutex);
+ while ((n = rb_first(&alloc->allocated_buffers))) {
+ buffer = rb_entry(n, struct binder_buffer, rb_node);
+
+ /* Transaction should already have been freed */
+ BUG_ON(buffer->transaction);
+
+ binder_free_buf_locked(alloc, buffer);
+ buffers++;
+ }
+
+ while (!list_empty(&alloc->buffers)) {
+ buffer = list_first_entry(&alloc->buffers,
+ struct binder_buffer, entry);
+ WARN_ON(!buffer->free);
+
+ list_del(&buffer->entry);
+ WARN_ON_ONCE(!list_empty(&alloc->buffers));
+ kfree(buffer);
+ }
+
+ page_count = 0;
+ if (alloc->pages) {
+ int i;
+
+ for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
+ void *page_addr;
+ bool on_lru;
+
+ if (!alloc->pages[i].page_ptr)
+ continue;
+
+ on_lru = list_lru_del(&binder_alloc_lru,
+ &alloc->pages[i].lru);
+ page_addr = alloc->buffer + i * PAGE_SIZE;
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "%s: %d: page %d at %pK %s\n",
+ __func__, alloc->pid, i, page_addr,
+ on_lru ? "on lru" : "active");
+ unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
+ __free_page(alloc->pages[i].page_ptr);
+ page_count++;
+ }
+ kfree(alloc->pages);
+ vfree(alloc->buffer);
+ }
+ mutex_unlock(&alloc->mutex);
+
+ binder_alloc_debug(BINDER_DEBUG_OPEN_CLOSE,
+ "%s: %d buffers %d, pages %d\n",
+ __func__, alloc->pid, buffers, page_count);
+}
+
+static void print_binder_buffer(struct seq_file *m, const char *prefix,
+ struct binder_buffer *buffer)
+{
+ seq_printf(m, "%s %d: %pK size %zd:%zd:%zd %s\n",
+ prefix, buffer->debug_id, buffer->data,
+ buffer->data_size, buffer->offsets_size,
+ buffer->extra_buffers_size,
+ buffer->transaction ? "active" : "delivered");
+}
+
+/**
+ * binder_alloc_print_allocated() - print buffer info
+ * @m: seq_file for output via seq_printf()
+ * @alloc: binder_alloc for this proc
+ *
+ * Prints information about every buffer associated with
+ * the binder_alloc state to the given seq_file
+ */
+void binder_alloc_print_allocated(struct seq_file *m,
+ struct binder_alloc *alloc)
+{
+ struct rb_node *n;
+
+ mutex_lock(&alloc->mutex);
+ for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
+ print_binder_buffer(m, " buffer",
+ rb_entry(n, struct binder_buffer, rb_node));
+ mutex_unlock(&alloc->mutex);
+}
+
+/**
+ * binder_alloc_print_pages() - print page usage
+ * @m: seq_file for output via seq_printf()
+ * @alloc: binder_alloc for this proc
+ */
+void binder_alloc_print_pages(struct seq_file *m,
+ struct binder_alloc *alloc)
+{
+ struct binder_lru_page *page;
+ int i;
+ int active = 0;
+ int lru = 0;
+ int free = 0;
+
+ mutex_lock(&alloc->mutex);
+ for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
+ page = &alloc->pages[i];
+ if (!page->page_ptr)
+ free++;
+ else if (list_empty(&page->lru))
+ active++;
+ else
+ lru++;
+ }
+ mutex_unlock(&alloc->mutex);
+ seq_printf(m, " pages: %d:%d:%d\n", active, lru, free);
+}
+
+/**
+ * binder_alloc_get_allocated_count() - return count of buffers
+ * @alloc: binder_alloc for this proc
+ *
+ * Return: count of allocated buffers
+ */
+int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
+{
+ struct rb_node *n;
+ int count = 0;
+
+ mutex_lock(&alloc->mutex);
+ for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
+ count++;
+ mutex_unlock(&alloc->mutex);
+ return count;
+}
+
+
+/**
+ * binder_alloc_vma_close() - invalidate address space
+ * @alloc: binder_alloc for this proc
+ *
+ * Called from binder_vma_close() when releasing address space.
+ * Clears alloc->vma to prevent new incoming transactions from
+ * allocating more buffers.
+ */
+void binder_alloc_vma_close(struct binder_alloc *alloc)
+{
+ WRITE_ONCE(alloc->vma, NULL);
+ WRITE_ONCE(alloc->vma_vm_mm, NULL);
+}
+
+/**
+ * binder_alloc_free_page() - shrinker callback to free pages
+ * @item: item to free
+ * @lock: lock protecting the item
+ * @cb_arg: callback argument
+ *
+ * Called from list_lru_walk() in binder_shrink_scan() to free
+ * up pages when the system is under memory pressure.
+ */
+enum lru_status binder_alloc_free_page(struct list_head *item,
+ struct list_lru_one *lru,
+ spinlock_t *lock,
+ void *cb_arg)
+{
+ struct mm_struct *mm = NULL;
+ struct binder_lru_page *page = container_of(item,
+ struct binder_lru_page,
+ lru);
+ struct binder_alloc *alloc;
+ uintptr_t page_addr;
+ size_t index;
+
+ alloc = page->alloc;
+ if (!mutex_trylock(&alloc->mutex))
+ goto err_get_alloc_mutex_failed;
+
+ if (!page->page_ptr)
+ goto err_page_already_freed;
+
+ index = page - alloc->pages;
+ page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE;
+ if (alloc->vma) {
+ mm = get_task_mm(alloc->tsk);
+ if (!mm)
+ goto err_get_task_mm_failed;
+ if (!down_write_trylock(&mm->mmap_sem))
+ goto err_down_write_mmap_sem_failed;
+
+ trace_binder_unmap_user_start(alloc, index);
+
+ zap_page_range(alloc->vma,
+ page_addr + alloc->user_buffer_offset,
+ PAGE_SIZE);
+
+ trace_binder_unmap_user_end(alloc, index);
+
+ up_write(&mm->mmap_sem);
+ mmput(mm);
+ }
+
+ trace_binder_unmap_kernel_start(alloc, index);
+
+ unmap_kernel_range(page_addr, PAGE_SIZE);
+ __free_page(page->page_ptr);
+ page->page_ptr = NULL;
+
+ trace_binder_unmap_kernel_end(alloc, index);
+
+ list_lru_isolate(lru, item);
+
+ mutex_unlock(&alloc->mutex);
+ return LRU_REMOVED;
+
+err_down_write_mmap_sem_failed:
+ mmput(mm);
+err_get_task_mm_failed:
+err_page_already_freed:
+ mutex_unlock(&alloc->mutex);
+err_get_alloc_mutex_failed:
+ return LRU_SKIP;
+}
+
+static unsigned long
+binder_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
+{
+ unsigned long ret = list_lru_count(&binder_alloc_lru);
+ return ret;
+}
+
+static unsigned long
+binder_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
+{
+ unsigned long ret;
+
+ ret = list_lru_walk(&binder_alloc_lru, binder_alloc_free_page,
+ NULL, sc->nr_to_scan);
+ return ret;
+}
+
+struct shrinker binder_shrinker = {
+ .count_objects = binder_shrink_count,
+ .scan_objects = binder_shrink_scan,
+ .seeks = DEFAULT_SEEKS,
+};
+
+/**
+ * binder_alloc_init() - called by binder_open() for per-proc initialization
+ * @alloc: binder_alloc for this proc
+ *
+ * Called from binder_open() to initialize binder_alloc fields for
+ * new binder proc
+ */
+void binder_alloc_init(struct binder_alloc *alloc)
+{
+ alloc->tsk = current->group_leader;
+ alloc->pid = current->group_leader->pid;
+ mutex_init(&alloc->mutex);
+ INIT_LIST_HEAD(&alloc->buffers);
+}
+
+void binder_alloc_shrinker_init(void)
+{
+ list_lru_init(&binder_alloc_lru);
+ register_shrinker(&binder_shrinker);
+}
diff --git a/drivers/android/binder_alloc.h b/drivers/android/binder_alloc.h
new file mode 100644
index 000000000000..a3a3602c689c
--- /dev/null
+++ b/drivers/android/binder_alloc.h
@@ -0,0 +1,187 @@
+/*
+ * Copyright (C) 2017 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_BINDER_ALLOC_H
+#define _LINUX_BINDER_ALLOC_H
+
+#include <linux/rbtree.h>
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/rtmutex.h>
+#include <linux/vmalloc.h>
+#include <linux/slab.h>
+#include <linux/list_lru.h>
+
+extern struct list_lru binder_alloc_lru;
+struct binder_transaction;
+
+/**
+ * struct binder_buffer - buffer used for binder transactions
+ * @entry: entry alloc->buffers
+ * @rb_node: node for allocated_buffers/free_buffers rb trees
+ * @free: true if buffer is free
+ * @allow_user_free: describe the second member of struct blah,
+ * @async_transaction: describe the second member of struct blah,
+ * @debug_id: describe the second member of struct blah,
+ * @transaction: describe the second member of struct blah,
+ * @target_node: describe the second member of struct blah,
+ * @data_size: describe the second member of struct blah,
+ * @offsets_size: describe the second member of struct blah,
+ * @extra_buffers_size: describe the second member of struct blah,
+ * @data:i describe the second member of struct blah,
+ *
+ * Bookkeeping structure for binder transaction buffers
+ */
+struct binder_buffer {
+ struct list_head entry; /* free and allocated entries by address */
+ struct rb_node rb_node; /* free entry by size or allocated entry */
+ /* by address */
+ unsigned free:1;
+ unsigned allow_user_free:1;
+ unsigned async_transaction:1;
+ unsigned free_in_progress:1;
+ unsigned debug_id:28;
+
+ struct binder_transaction *transaction;
+
+ struct binder_node *target_node;
+ size_t data_size;
+ size_t offsets_size;
+ size_t extra_buffers_size;
+ void *data;
+};
+
+/**
+ * struct binder_lru_page - page object used for binder shrinker
+ * @page_ptr: pointer to physical page in mmap'd space
+ * @lru: entry in binder_alloc_lru
+ * @alloc: binder_alloc for a proc
+ */
+struct binder_lru_page {
+ struct list_head lru;
+ struct page *page_ptr;
+ struct binder_alloc *alloc;
+};
+
+/**
+ * struct binder_alloc - per-binder proc state for binder allocator
+ * @vma: vm_area_struct passed to mmap_handler
+ * (invarient after mmap)
+ * @tsk: tid for task that called init for this proc
+ * (invariant after init)
+ * @vma_vm_mm: copy of vma->vm_mm (invarient after mmap)
+ * @buffer: base of per-proc address space mapped via mmap
+ * @user_buffer_offset: offset between user and kernel VAs for buffer
+ * @buffers: list of all buffers for this proc
+ * @free_buffers: rb tree of buffers available for allocation
+ * sorted by size
+ * @allocated_buffers: rb tree of allocated buffers sorted by address
+ * @free_async_space: VA space available for async buffers. This is
+ * initialized at mmap time to 1/2 the full VA space
+ * @pages: array of binder_lru_page
+ * @buffer_size: size of address space specified via mmap
+ * @pid: pid for associated binder_proc (invariant after init)
+ *
+ * Bookkeeping structure for per-proc address space management for binder
+ * buffers. It is normally initialized during binder_init() and binder_mmap()
+ * calls. The address space is used for both user-visible buffers and for
+ * struct binder_buffer objects used to track the user buffers
+ */
+struct binder_alloc {
+ struct mutex mutex;
+ struct task_struct *tsk;
+ struct vm_area_struct *vma;
+ struct mm_struct *vma_vm_mm;
+ void *buffer;
+ ptrdiff_t user_buffer_offset;
+ struct list_head buffers;
+ struct rb_root free_buffers;
+ struct rb_root allocated_buffers;
+ size_t free_async_space;
+ struct binder_lru_page *pages;
+ size_t buffer_size;
+ uint32_t buffer_free;
+ int pid;
+};
+
+#ifdef CONFIG_ANDROID_BINDER_IPC_SELFTEST
+void binder_selftest_alloc(struct binder_alloc *alloc);
+#else
+static inline void binder_selftest_alloc(struct binder_alloc *alloc) {}
+#endif
+enum lru_status binder_alloc_free_page(struct list_head *item,
+ struct list_lru_one *lru,
+ spinlock_t *lock, void *cb_arg);
+extern struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
+ size_t data_size,
+ size_t offsets_size,
+ size_t extra_buffers_size,
+ int is_async);
+extern void binder_alloc_init(struct binder_alloc *alloc);
+void binder_alloc_shrinker_init(void);
+extern void binder_alloc_vma_close(struct binder_alloc *alloc);
+extern struct binder_buffer *
+binder_alloc_prepare_to_free(struct binder_alloc *alloc,
+ uintptr_t user_ptr);
+extern void binder_alloc_free_buf(struct binder_alloc *alloc,
+ struct binder_buffer *buffer);
+extern int binder_alloc_mmap_handler(struct binder_alloc *alloc,
+ struct vm_area_struct *vma);
+extern void binder_alloc_deferred_release(struct binder_alloc *alloc);
+extern int binder_alloc_get_allocated_count(struct binder_alloc *alloc);
+extern void binder_alloc_print_allocated(struct seq_file *m,
+ struct binder_alloc *alloc);
+void binder_alloc_print_pages(struct seq_file *m,
+ struct binder_alloc *alloc);
+
+/**
+ * binder_alloc_get_free_async_space() - get free space available for async
+ * @alloc: binder_alloc for this proc
+ *
+ * Return: the bytes remaining in the address-space for async transactions
+ */
+static inline size_t
+binder_alloc_get_free_async_space(struct binder_alloc *alloc)
+{
+ size_t free_async_space;
+
+ mutex_lock(&alloc->mutex);
+ free_async_space = alloc->free_async_space;
+ mutex_unlock(&alloc->mutex);
+ return free_async_space;
+}
+
+/**
+ * binder_alloc_get_user_buffer_offset() - get offset between kernel/user addrs
+ * @alloc: binder_alloc for this proc
+ *
+ * Return: the offset between kernel and user-space addresses to use for
+ * virtual address conversion
+ */
+static inline ptrdiff_t
+binder_alloc_get_user_buffer_offset(struct binder_alloc *alloc)
+{
+ /*
+ * user_buffer_offset is constant if vma is set and
+ * undefined if vma is not set. It is possible to
+ * get here with !alloc->vma if the target process
+ * is dying while a transaction is being initiated.
+ * Returning the old value is ok in this case and
+ * the transaction will fail.
+ */
+ return alloc->user_buffer_offset;
+}
+
+#endif /* _LINUX_BINDER_ALLOC_H */
+
diff --git a/drivers/android/binder_alloc_selftest.c b/drivers/android/binder_alloc_selftest.c
new file mode 100644
index 000000000000..8bd7bcef967d
--- /dev/null
+++ b/drivers/android/binder_alloc_selftest.c
@@ -0,0 +1,310 @@
+/* binder_alloc_selftest.c
+ *
+ * Android IPC Subsystem
+ *
+ * Copyright (C) 2017 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/mm_types.h>
+#include <linux/err.h>
+#include "binder_alloc.h"
+
+#define BUFFER_NUM 5
+#define BUFFER_MIN_SIZE (PAGE_SIZE / 8)
+
+static bool binder_selftest_run = true;
+static int binder_selftest_failures;
+static DEFINE_MUTEX(binder_selftest_lock);
+
+/**
+ * enum buf_end_align_type - Page alignment of a buffer
+ * end with regard to the end of the previous buffer.
+ *
+ * In the pictures below, buf2 refers to the buffer we
+ * are aligning. buf1 refers to previous buffer by addr.
+ * Symbol [ means the start of a buffer, ] means the end
+ * of a buffer, and | means page boundaries.
+ */
+enum buf_end_align_type {
+ /**
+ * @SAME_PAGE_UNALIGNED: The end of this buffer is on
+ * the same page as the end of the previous buffer and
+ * is not page aligned. Examples:
+ * buf1 ][ buf2 ][ ...
+ * buf1 ]|[ buf2 ][ ...
+ */
+ SAME_PAGE_UNALIGNED = 0,
+ /**
+ * @SAME_PAGE_ALIGNED: When the end of the previous buffer
+ * is not page aligned, the end of this buffer is on the
+ * same page as the end of the previous buffer and is page
+ * aligned. When the previous buffer is page aligned, the
+ * end of this buffer is aligned to the next page boundary.
+ * Examples:
+ * buf1 ][ buf2 ]| ...
+ * buf1 ]|[ buf2 ]| ...
+ */
+ SAME_PAGE_ALIGNED,
+ /**
+ * @NEXT_PAGE_UNALIGNED: The end of this buffer is on
+ * the page next to the end of the previous buffer and
+ * is not page aligned. Examples:
+ * buf1 ][ buf2 | buf2 ][ ...
+ * buf1 ]|[ buf2 | buf2 ][ ...
+ */
+ NEXT_PAGE_UNALIGNED,
+ /**
+ * @NEXT_PAGE_ALIGNED: The end of this buffer is on
+ * the page next to the end of the previous buffer and
+ * is page aligned. Examples:
+ * buf1 ][ buf2 | buf2 ]| ...
+ * buf1 ]|[ buf2 | buf2 ]| ...
+ */
+ NEXT_PAGE_ALIGNED,
+ /**
+ * @NEXT_NEXT_UNALIGNED: The end of this buffer is on
+ * the page that follows the page after the end of the
+ * previous buffer and is not page aligned. Examples:
+ * buf1 ][ buf2 | buf2 | buf2 ][ ...
+ * buf1 ]|[ buf2 | buf2 | buf2 ][ ...
+ */
+ NEXT_NEXT_UNALIGNED,
+ LOOP_END,
+};
+
+static void pr_err_size_seq(size_t *sizes, int *seq)
+{
+ int i;
+
+ pr_err("alloc sizes: ");
+ for (i = 0; i < BUFFER_NUM; i++)
+ pr_cont("[%zu]", sizes[i]);
+ pr_cont("\n");
+ pr_err("free seq: ");
+ for (i = 0; i < BUFFER_NUM; i++)
+ pr_cont("[%d]", seq[i]);
+ pr_cont("\n");
+}
+
+static bool check_buffer_pages_allocated(struct binder_alloc *alloc,
+ struct binder_buffer *buffer,
+ size_t size)
+{
+ void *page_addr, *end;
+ int page_index;
+
+ end = (void *)PAGE_ALIGN((uintptr_t)buffer->data + size);
+ page_addr = buffer->data;
+ for (; page_addr < end; page_addr += PAGE_SIZE) {
+ page_index = (page_addr - alloc->buffer) / PAGE_SIZE;
+ if (!alloc->pages[page_index].page_ptr ||
+ !list_empty(&alloc->pages[page_index].lru)) {
+ pr_err("expect alloc but is %s at page index %d\n",
+ alloc->pages[page_index].page_ptr ?
+ "lru" : "free", page_index);
+ return false;
+ }
+ }
+ return true;
+}
+
+static void binder_selftest_alloc_buf(struct binder_alloc *alloc,
+ struct binder_buffer *buffers[],
+ size_t *sizes, int *seq)
+{
+ int i;
+
+ for (i = 0; i < BUFFER_NUM; i++) {
+ buffers[i] = binder_alloc_new_buf(alloc, sizes[i], 0, 0, 0);
+ if (IS_ERR(buffers[i]) ||
+ !check_buffer_pages_allocated(alloc, buffers[i],
+ sizes[i])) {
+ pr_err_size_seq(sizes, seq);
+ binder_selftest_failures++;
+ }
+ }
+}
+
+static void binder_selftest_free_buf(struct binder_alloc *alloc,
+ struct binder_buffer *buffers[],
+ size_t *sizes, int *seq, size_t end)
+{
+ int i;
+
+ for (i = 0; i < BUFFER_NUM; i++)
+ binder_alloc_free_buf(alloc, buffers[seq[i]]);
+
+ for (i = 0; i < end / PAGE_SIZE; i++) {
+ /**
+ * Error message on a free page can be false positive
+ * if binder shrinker ran during binder_alloc_free_buf
+ * calls above.
+ */
+ if (list_empty(&alloc->pages[i].lru)) {
+ pr_err_size_seq(sizes, seq);
+ pr_err("expect lru but is %s at page index %d\n",
+ alloc->pages[i].page_ptr ? "alloc" : "free", i);
+ binder_selftest_failures++;
+ }
+ }
+}
+
+static void binder_selftest_free_page(struct binder_alloc *alloc)
+{
+ int i;
+ unsigned long count;
+
+ while ((count = list_lru_count(&binder_alloc_lru))) {
+ list_lru_walk(&binder_alloc_lru, binder_alloc_free_page,
+ NULL, count);
+ }
+
+ for (i = 0; i < (alloc->buffer_size / PAGE_SIZE); i++) {
+ if (alloc->pages[i].page_ptr) {
+ pr_err("expect free but is %s at page index %d\n",
+ list_empty(&alloc->pages[i].lru) ?
+ "alloc" : "lru", i);
+ binder_selftest_failures++;
+ }
+ }
+}
+
+static void binder_selftest_alloc_free(struct binder_alloc *alloc,
+ size_t *sizes, int *seq, size_t end)
+{
+ struct binder_buffer *buffers[BUFFER_NUM];
+
+ binder_selftest_alloc_buf(alloc, buffers, sizes, seq);
+ binder_selftest_free_buf(alloc, buffers, sizes, seq, end);
+
+ /* Allocate from lru. */
+ binder_selftest_alloc_buf(alloc, buffers, sizes, seq);
+ if (list_lru_count(&binder_alloc_lru))
+ pr_err("lru list should be empty but is not\n");
+
+ binder_selftest_free_buf(alloc, buffers, sizes, seq, end);
+ binder_selftest_free_page(alloc);
+}
+
+static bool is_dup(int *seq, int index, int val)
+{
+ int i;
+
+ for (i = 0; i < index; i++) {
+ if (seq[i] == val)
+ return true;
+ }
+ return false;
+}
+
+/* Generate BUFFER_NUM factorial free orders. */
+static void binder_selftest_free_seq(struct binder_alloc *alloc,
+ size_t *sizes, int *seq,
+ int index, size_t end)
+{
+ int i;
+
+ if (index == BUFFER_NUM) {
+ binder_selftest_alloc_free(alloc, sizes, seq, end);
+ return;
+ }
+ for (i = 0; i < BUFFER_NUM; i++) {
+ if (is_dup(seq, index, i))
+ continue;
+ seq[index] = i;
+ binder_selftest_free_seq(alloc, sizes, seq, index + 1, end);
+ }
+}
+
+static void binder_selftest_alloc_size(struct binder_alloc *alloc,
+ size_t *end_offset)
+{
+ int i;
+ int seq[BUFFER_NUM] = {0};
+ size_t front_sizes[BUFFER_NUM];
+ size_t back_sizes[BUFFER_NUM];
+ size_t last_offset, offset = 0;
+
+ for (i = 0; i < BUFFER_NUM; i++) {
+ last_offset = offset;
+ offset = end_offset[i];
+ front_sizes[i] = offset - last_offset;
+ back_sizes[BUFFER_NUM - i - 1] = front_sizes[i];
+ }
+ /*
+ * Buffers share the first or last few pages.
+ * Only BUFFER_NUM - 1 buffer sizes are adjustable since
+ * we need one giant buffer before getting to the last page.
+ */
+ back_sizes[0] += alloc->buffer_size - end_offset[BUFFER_NUM - 1];
+ binder_selftest_free_seq(alloc, front_sizes, seq, 0,
+ end_offset[BUFFER_NUM - 1]);
+ binder_selftest_free_seq(alloc, back_sizes, seq, 0, alloc->buffer_size);
+}
+
+static void binder_selftest_alloc_offset(struct binder_alloc *alloc,
+ size_t *end_offset, int index)
+{
+ int align;
+ size_t end, prev;
+
+ if (index == BUFFER_NUM) {
+ binder_selftest_alloc_size(alloc, end_offset);
+ return;
+ }
+ prev = index == 0 ? 0 : end_offset[index - 1];
+ end = prev;
+
+ BUILD_BUG_ON(BUFFER_MIN_SIZE * BUFFER_NUM >= PAGE_SIZE);
+
+ for (align = SAME_PAGE_UNALIGNED; align < LOOP_END; align++) {
+ if (align % 2)
+ end = ALIGN(end, PAGE_SIZE);
+ else
+ end += BUFFER_MIN_SIZE;
+ end_offset[index] = end;
+ binder_selftest_alloc_offset(alloc, end_offset, index + 1);
+ }
+}
+
+/**
+ * binder_selftest_alloc() - Test alloc and free of buffer pages.
+ * @alloc: Pointer to alloc struct.
+ *
+ * Allocate BUFFER_NUM buffers to cover all page alignment cases,
+ * then free them in all orders possible. Check that pages are
+ * correctly allocated, put onto lru when buffers are freed, and
+ * are freed when binder_alloc_free_page is called.
+ */
+void binder_selftest_alloc(struct binder_alloc *alloc)
+{
+ size_t end_offset[BUFFER_NUM];
+
+ if (!binder_selftest_run)
+ return;
+ mutex_lock(&binder_selftest_lock);
+ if (!binder_selftest_run || !alloc->vma)
+ goto done;
+ pr_info("STARTED\n");
+ binder_selftest_alloc_offset(alloc, end_offset, 0);
+ binder_selftest_run = false;
+ if (binder_selftest_failures > 0)
+ pr_info("%d tests FAILED\n", binder_selftest_failures);
+ else
+ pr_info("PASSED\n");
+
+done:
+ mutex_unlock(&binder_selftest_lock);
+}
diff --git a/drivers/android/binder_trace.h b/drivers/android/binder_trace.h
index 7f20f3dc8369..76e3b9c8a8a2 100644
--- a/drivers/android/binder_trace.h
+++ b/drivers/android/binder_trace.h
@@ -23,7 +23,8 @@
struct binder_buffer;
struct binder_node;
struct binder_proc;
-struct binder_ref;
+struct binder_alloc;
+struct binder_ref_data;
struct binder_thread;
struct binder_transaction;
@@ -146,8 +147,8 @@ TRACE_EVENT(binder_transaction_received,
TRACE_EVENT(binder_transaction_node_to_ref,
TP_PROTO(struct binder_transaction *t, struct binder_node *node,
- struct binder_ref *ref),
- TP_ARGS(t, node, ref),
+ struct binder_ref_data *rdata),
+ TP_ARGS(t, node, rdata),
TP_STRUCT__entry(
__field(int, debug_id)
@@ -160,8 +161,8 @@ TRACE_EVENT(binder_transaction_node_to_ref,
__entry->debug_id = t->debug_id;
__entry->node_debug_id = node->debug_id;
__entry->node_ptr = node->ptr;
- __entry->ref_debug_id = ref->debug_id;
- __entry->ref_desc = ref->desc;
+ __entry->ref_debug_id = rdata->debug_id;
+ __entry->ref_desc = rdata->desc;
),
TP_printk("transaction=%d node=%d src_ptr=0x%016llx ==> dest_ref=%d dest_desc=%d",
__entry->debug_id, __entry->node_debug_id,
@@ -170,8 +171,9 @@ TRACE_EVENT(binder_transaction_node_to_ref,
);
TRACE_EVENT(binder_transaction_ref_to_node,
- TP_PROTO(struct binder_transaction *t, struct binder_ref *ref),
- TP_ARGS(t, ref),
+ TP_PROTO(struct binder_transaction *t, struct binder_node *node,
+ struct binder_ref_data *rdata),
+ TP_ARGS(t, node, rdata),
TP_STRUCT__entry(
__field(int, debug_id)
@@ -182,10 +184,10 @@ TRACE_EVENT(binder_transaction_ref_to_node,
),
TP_fast_assign(
__entry->debug_id = t->debug_id;
- __entry->ref_debug_id = ref->debug_id;
- __entry->ref_desc = ref->desc;
- __entry->node_debug_id = ref->node->debug_id;
- __entry->node_ptr = ref->node->ptr;
+ __entry->ref_debug_id = rdata->debug_id;
+ __entry->ref_desc = rdata->desc;
+ __entry->node_debug_id = node->debug_id;
+ __entry->node_ptr = node->ptr;
),
TP_printk("transaction=%d node=%d src_ref=%d src_desc=%d ==> dest_ptr=0x%016llx",
__entry->debug_id, __entry->node_debug_id,
@@ -194,9 +196,10 @@ TRACE_EVENT(binder_transaction_ref_to_node,
);
TRACE_EVENT(binder_transaction_ref_to_ref,
- TP_PROTO(struct binder_transaction *t, struct binder_ref *src_ref,
- struct binder_ref *dest_ref),
- TP_ARGS(t, src_ref, dest_ref),
+ TP_PROTO(struct binder_transaction *t, struct binder_node *node,
+ struct binder_ref_data *src_ref,
+ struct binder_ref_data *dest_ref),
+ TP_ARGS(t, node, src_ref, dest_ref),
TP_STRUCT__entry(
__field(int, debug_id)
@@ -208,7 +211,7 @@ TRACE_EVENT(binder_transaction_ref_to_ref,
),
TP_fast_assign(
__entry->debug_id = t->debug_id;
- __entry->node_debug_id = src_ref->node->debug_id;
+ __entry->node_debug_id = node->debug_id;
__entry->src_ref_debug_id = src_ref->debug_id;
__entry->src_ref_desc = src_ref->desc;
__entry->dest_ref_debug_id = dest_ref->debug_id;
@@ -268,9 +271,9 @@ DEFINE_EVENT(binder_buffer_class, binder_transaction_failed_buffer_release,
TP_ARGS(buffer));
TRACE_EVENT(binder_update_page_range,
- TP_PROTO(struct binder_proc *proc, bool allocate,
+ TP_PROTO(struct binder_alloc *alloc, bool allocate,
void *start, void *end),
- TP_ARGS(proc, allocate, start, end),
+ TP_ARGS(alloc, allocate, start, end),
TP_STRUCT__entry(
__field(int, proc)
__field(bool, allocate)
@@ -278,9 +281,9 @@ TRACE_EVENT(binder_update_page_range,
__field(size_t, size)
),
TP_fast_assign(
- __entry->proc = proc->pid;
+ __entry->proc = alloc->pid;
__entry->allocate = allocate;
- __entry->offset = start - proc->buffer;
+ __entry->offset = start - alloc->buffer;
__entry->size = end - start;
),
TP_printk("proc=%d allocate=%d offset=%zu size=%zu",
@@ -288,6 +291,61 @@ TRACE_EVENT(binder_update_page_range,
__entry->offset, __entry->size)
);
+DECLARE_EVENT_CLASS(binder_lru_page_class,
+ TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
+ TP_ARGS(alloc, page_index),
+ TP_STRUCT__entry(
+ __field(int, proc)
+ __field(size_t, page_index)
+ ),
+ TP_fast_assign(
+ __entry->proc = alloc->pid;
+ __entry->page_index = page_index;
+ ),
+ TP_printk("proc=%d page_index=%zu",
+ __entry->proc, __entry->page_index)
+);
+
+DEFINE_EVENT(binder_lru_page_class, binder_alloc_lru_start,
+ TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
+ TP_ARGS(alloc, page_index));
+
+DEFINE_EVENT(binder_lru_page_class, binder_alloc_lru_end,
+ TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
+ TP_ARGS(alloc, page_index));
+
+DEFINE_EVENT(binder_lru_page_class, binder_free_lru_start,
+ TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
+ TP_ARGS(alloc, page_index));
+
+DEFINE_EVENT(binder_lru_page_class, binder_free_lru_end,
+ TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
+ TP_ARGS(alloc, page_index));
+
+DEFINE_EVENT(binder_lru_page_class, binder_alloc_page_start,
+ TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
+ TP_ARGS(alloc, page_index));
+
+DEFINE_EVENT(binder_lru_page_class, binder_alloc_page_end,
+ TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
+ TP_ARGS(alloc, page_index));
+
+DEFINE_EVENT(binder_lru_page_class, binder_unmap_user_start,
+ TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
+ TP_ARGS(alloc, page_index));
+
+DEFINE_EVENT(binder_lru_page_class, binder_unmap_user_end,
+ TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
+ TP_ARGS(alloc, page_index));
+
+DEFINE_EVENT(binder_lru_page_class, binder_unmap_kernel_start,
+ TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
+ TP_ARGS(alloc, page_index));
+
+DEFINE_EVENT(binder_lru_page_class, binder_unmap_kernel_end,
+ TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
+ TP_ARGS(alloc, page_index));
+
TRACE_EVENT(binder_command,
TP_PROTO(uint32_t cmd),
TP_ARGS(cmd),
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 363fc5330c21..488c93724220 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -153,6 +153,16 @@ config AHCI_CEVA
If unsure, say N.
+config AHCI_MTK
+ tristate "MediaTek AHCI SATA support"
+ depends on ARCH_MEDIATEK
+ select MFD_SYSCON
+ help
+ This option enables support for the MediaTek SoC's
+ onboard AHCI SATA controller.
+
+ If unsure, say N.
+
config AHCI_MVEBU
tristate "Marvell EBU AHCI SATA support"
depends on ARCH_MVEBU
diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile
index a26ef5a93919..ff9cd2e37458 100644
--- a/drivers/ata/Makefile
+++ b/drivers/ata/Makefile
@@ -17,6 +17,7 @@ obj-$(CONFIG_AHCI_CEVA) += ahci_ceva.o libahci.o libahci_platform.o
obj-$(CONFIG_AHCI_DA850) += ahci_da850.o libahci.o libahci_platform.o
obj-$(CONFIG_AHCI_DM816) += ahci_dm816.o libahci.o libahci_platform.o
obj-$(CONFIG_AHCI_IMX) += ahci_imx.o libahci.o libahci_platform.o
+obj-$(CONFIG_AHCI_MTK) += ahci_mtk.o libahci.o libahci_platform.o
obj-$(CONFIG_AHCI_MVEBU) += ahci_mvebu.o libahci.o libahci_platform.o
obj-$(CONFIG_AHCI_OCTEON) += ahci_octeon.o
obj-$(CONFIG_AHCI_SUNXI) += ahci_sunxi.o libahci.o libahci_platform.o
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 5a5fd0b404eb..cb9b0e9090e3 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -1469,7 +1469,14 @@ static void ahci_remap_check(struct pci_dev *pdev, int bar,
return;
dev_warn(&pdev->dev, "Found %d remapped NVMe devices.\n", count);
- dev_warn(&pdev->dev, "Switch your BIOS from RAID to AHCI mode to use them.\n");
+ dev_warn(&pdev->dev,
+ "Switch your BIOS from RAID to AHCI mode to use them.\n");
+
+ /*
+ * Don't rely on the msi-x capability in the remap case,
+ * share the legacy interrupt across ahci and remapped devices.
+ */
+ hpriv->flags |= AHCI_HFLAG_NO_MSI;
}
static int ahci_get_irq_vector(struct ata_host *host, int port)
diff --git a/drivers/ata/ahci_da850.c b/drivers/ata/ahci_da850.c
index 1a50cd3b4233..9b34dff64536 100644
--- a/drivers/ata/ahci_da850.c
+++ b/drivers/ata/ahci_da850.c
@@ -216,12 +216,16 @@ static int ahci_da850_probe(struct platform_device *pdev)
return rc;
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (!res)
+ if (!res) {
+ rc = -ENODEV;
goto disable_resources;
+ }
pwrdn_reg = devm_ioremap(dev, res->start, resource_size(res));
- if (!pwrdn_reg)
+ if (!pwrdn_reg) {
+ rc = -ENOMEM;
goto disable_resources;
+ }
da850_sata_init(dev, pwrdn_reg, hpriv->mmio, mpy);
diff --git a/drivers/ata/ahci_mtk.c b/drivers/ata/ahci_mtk.c
new file mode 100644
index 000000000000..80854f71559a
--- /dev/null
+++ b/drivers/ata/ahci_mtk.c
@@ -0,0 +1,196 @@
+/*
+ * MeidaTek AHCI SATA driver
+ *
+ * Copyright (c) 2017 MediaTek Inc.
+ * Author: Ryder Lee <ryder.lee@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/ahci_platform.h>
+#include <linux/kernel.h>
+#include <linux/libata.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include "ahci.h"
+
+#define DRV_NAME "ahci"
+
+#define SYS_CFG 0x14
+#define SYS_CFG_SATA_MSK GENMASK(31, 30)
+#define SYS_CFG_SATA_EN BIT(31)
+
+struct mtk_ahci_plat {
+ struct regmap *mode;
+ struct reset_control *axi_rst;
+ struct reset_control *sw_rst;
+ struct reset_control *reg_rst;
+};
+
+static const struct ata_port_info ahci_port_info = {
+ .flags = AHCI_FLAG_COMMON,
+ .pio_mask = ATA_PIO4,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &ahci_platform_ops,
+};
+
+static struct scsi_host_template ahci_platform_sht = {
+ AHCI_SHT(DRV_NAME),
+};
+
+static int mtk_ahci_platform_resets(struct ahci_host_priv *hpriv,
+ struct device *dev)
+{
+ struct mtk_ahci_plat *plat = hpriv->plat_data;
+ int err;
+
+ /* reset AXI bus and PHY part */
+ plat->axi_rst = devm_reset_control_get_optional_exclusive(dev, "axi");
+ if (PTR_ERR(plat->axi_rst) == -EPROBE_DEFER)
+ return PTR_ERR(plat->axi_rst);
+
+ plat->sw_rst = devm_reset_control_get_optional_exclusive(dev, "sw");
+ if (PTR_ERR(plat->sw_rst) == -EPROBE_DEFER)
+ return PTR_ERR(plat->sw_rst);
+
+ plat->reg_rst = devm_reset_control_get_optional_exclusive(dev, "reg");
+ if (PTR_ERR(plat->reg_rst) == -EPROBE_DEFER)
+ return PTR_ERR(plat->reg_rst);
+
+ err = reset_control_assert(plat->axi_rst);
+ if (err) {
+ dev_err(dev, "failed to assert AXI bus\n");
+ return err;
+ }
+
+ err = reset_control_assert(plat->sw_rst);
+ if (err) {
+ dev_err(dev, "failed to assert PHY digital part\n");
+ return err;
+ }
+
+ err = reset_control_assert(plat->reg_rst);
+ if (err) {
+ dev_err(dev, "failed to assert PHY register part\n");
+ return err;
+ }
+
+ err = reset_control_deassert(plat->reg_rst);
+ if (err) {
+ dev_err(dev, "failed to deassert PHY register part\n");
+ return err;
+ }
+
+ err = reset_control_deassert(plat->sw_rst);
+ if (err) {
+ dev_err(dev, "failed to deassert PHY digital part\n");
+ return err;
+ }
+
+ err = reset_control_deassert(plat->axi_rst);
+ if (err) {
+ dev_err(dev, "failed to deassert AXI bus\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static int mtk_ahci_parse_property(struct ahci_host_priv *hpriv,
+ struct device *dev)
+{
+ struct mtk_ahci_plat *plat = hpriv->plat_data;
+ struct device_node *np = dev->of_node;
+
+ /* enable SATA function if needed */
+ if (of_find_property(np, "mediatek,phy-mode", NULL)) {
+ plat->mode = syscon_regmap_lookup_by_phandle(
+ np, "mediatek,phy-mode");
+ if (IS_ERR(plat->mode)) {
+ dev_err(dev, "missing phy-mode phandle\n");
+ return PTR_ERR(plat->mode);
+ }
+
+ regmap_update_bits(plat->mode, SYS_CFG, SYS_CFG_SATA_MSK,
+ SYS_CFG_SATA_EN);
+ }
+
+ of_property_read_u32(np, "ports-implemented", &hpriv->force_port_map);
+
+ return 0;
+}
+
+static int mtk_ahci_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mtk_ahci_plat *plat;
+ struct ahci_host_priv *hpriv;
+ int err;
+
+ plat = devm_kzalloc(dev, sizeof(*plat), GFP_KERNEL);
+ if (!plat)
+ return -ENOMEM;
+
+ hpriv = ahci_platform_get_resources(pdev);
+ if (IS_ERR(hpriv))
+ return PTR_ERR(hpriv);
+
+ hpriv->plat_data = plat;
+
+ err = mtk_ahci_parse_property(hpriv, dev);
+ if (err)
+ return err;
+
+ err = mtk_ahci_platform_resets(hpriv, dev);
+ if (err)
+ return err;
+
+ err = ahci_platform_enable_resources(hpriv);
+ if (err)
+ return err;
+
+ err = ahci_platform_init_host(pdev, hpriv, &ahci_port_info,
+ &ahci_platform_sht);
+ if (err)
+ goto disable_resources;
+
+ return 0;
+
+disable_resources:
+ ahci_platform_disable_resources(hpriv);
+ return err;
+}
+
+static SIMPLE_DEV_PM_OPS(ahci_pm_ops, ahci_platform_suspend,
+ ahci_platform_resume);
+
+static const struct of_device_id ahci_of_match[] = {
+ { .compatible = "mediatek,mtk-ahci", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ahci_of_match);
+
+static struct platform_driver mtk_ahci_driver = {
+ .probe = mtk_ahci_probe,
+ .remove = ata_platform_remove_one,
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = ahci_of_match,
+ .pm = &ahci_pm_ops,
+ },
+};
+module_platform_driver(mtk_ahci_driver);
+
+MODULE_DESCRIPTION("MeidaTek SATA AHCI Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/ata/ahci_platform.c b/drivers/ata/ahci_platform.c
index 62a04c8fb5c9..99f9a895a459 100644
--- a/drivers/ata/ahci_platform.c
+++ b/drivers/ata/ahci_platform.c
@@ -93,6 +93,7 @@ MODULE_DEVICE_TABLE(acpi, ahci_acpi_match);
static struct platform_driver ahci_driver = {
.probe = ahci_probe,
.remove = ata_platform_remove_one,
+ .shutdown = ahci_platform_shutdown,
.driver = {
.name = DRV_NAME,
.of_match_table = ahci_of_match,
diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c
index cd2eab6aa92e..a270a1173c8c 100644
--- a/drivers/ata/libahci_platform.c
+++ b/drivers/ata/libahci_platform.c
@@ -602,6 +602,40 @@ static void ahci_host_stop(struct ata_host *host)
ahci_platform_disable_resources(hpriv);
}
+/**
+ * ahci_platform_shutdown - Disable interrupts and stop DMA for host ports
+ * @dev: platform device pointer for the host
+ *
+ * This function is called during system shutdown and performs the minimal
+ * deconfiguration required to ensure that an ahci_platform host cannot
+ * corrupt or otherwise interfere with a new kernel being started with kexec.
+ */
+void ahci_platform_shutdown(struct platform_device *pdev)
+{
+ struct ata_host *host = platform_get_drvdata(pdev);
+ struct ahci_host_priv *hpriv = host->private_data;
+ void __iomem *mmio = hpriv->mmio;
+ int i;
+
+ for (i = 0; i < host->n_ports; i++) {
+ struct ata_port *ap = host->ports[i];
+
+ /* Disable port interrupts */
+ if (ap->ops->freeze)
+ ap->ops->freeze(ap);
+
+ /* Stop the port DMA engines */
+ if (ap->ops->port_stop)
+ ap->ops->port_stop(ap);
+ }
+
+ /* Disable and clear host interrupts */
+ writel(readl(mmio + HOST_CTL) & ~HOST_IRQ_EN, mmio + HOST_CTL);
+ readl(mmio + HOST_CTL); /* flush */
+ writel(GENMASK(host->n_ports, 0), mmio + HOST_IRQ_STAT);
+}
+EXPORT_SYMBOL_GPL(ahci_platform_shutdown);
+
#ifdef CONFIG_PM_SLEEP
/**
* ahci_platform_suspend_host - Suspend an ahci-platform host
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index fa7dd4394c02..1945a8ea2099 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -2411,6 +2411,9 @@ static void ata_dev_config_trusted(struct ata_device *dev)
u64 trusted_cap;
unsigned int err;
+ if (!ata_id_has_trusted(dev->id))
+ return;
+
if (!ata_identify_page_supported(dev, ATA_LOG_SECURITY)) {
ata_dev_warn(dev,
"Security Log not supported\n");
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 3dbd05532c09..e4effef0c83f 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -645,12 +645,11 @@ void ata_scsi_cmd_error_handler(struct Scsi_Host *host, struct ata_port *ap,
* completions are honored. A scmd is determined to have
* timed out iff its associated qc is active and not failed.
*/
+ spin_lock_irqsave(ap->lock, flags);
if (ap->ops->error_handler) {
struct scsi_cmnd *scmd, *tmp;
int nr_timedout = 0;
- spin_lock_irqsave(ap->lock, flags);
-
/* This must occur under the ap->lock as we don't want
a polled recovery to race the real interrupt handler
@@ -700,12 +699,11 @@ void ata_scsi_cmd_error_handler(struct Scsi_Host *host, struct ata_port *ap,
if (nr_timedout)
__ata_port_freeze(ap);
- spin_unlock_irqrestore(ap->lock, flags);
/* initialize eh_tries */
ap->eh_tries = ATA_EH_MAX_TRIES;
- } else
- spin_unlock_wait(ap->lock);
+ }
+ spin_unlock_irqrestore(ap->lock, flags);
}
EXPORT_SYMBOL(ata_scsi_cmd_error_handler);
diff --git a/drivers/ata/libata-zpodd.c b/drivers/ata/libata-zpodd.c
index 8a01d09ac4db..23a62e4015d0 100644
--- a/drivers/ata/libata-zpodd.c
+++ b/drivers/ata/libata-zpodd.c
@@ -34,7 +34,7 @@ struct zpodd {
static int eject_tray(struct ata_device *dev)
{
struct ata_taskfile tf;
- const char cdb[] = { GPCMD_START_STOP_UNIT,
+ static const char cdb[] = { GPCMD_START_STOP_UNIT,
0, 0, 0,
0x02, /* LoEj */
0, 0, 0, 0, 0, 0, 0,
@@ -55,7 +55,7 @@ static enum odd_mech_type zpodd_get_mech_type(struct ata_device *dev)
unsigned int ret;
struct rm_feature_desc *desc = (void *)(buf + 8);
struct ata_taskfile tf;
- char cdb[] = { GPCMD_GET_CONFIGURATION,
+ static const char cdb[] = { GPCMD_GET_CONFIGURATION,
2, /* only 1 feature descriptor requested */
0, 3, /* 3, removable medium feature */
0, 0, 0,/* reserved */
diff --git a/drivers/ata/pata_amd.c b/drivers/ata/pata_amd.c
index 8d4d959a821c..8706533db57b 100644
--- a/drivers/ata/pata_amd.c
+++ b/drivers/ata/pata_amd.c
@@ -616,6 +616,7 @@ static const struct pci_device_id amd[] = {
{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP73_IDE), 8 },
{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP77_IDE), 8 },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CS5536_IDE), 9 },
+ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CS5536_DEV_IDE), 9 },
{ },
};
diff --git a/drivers/ata/pata_cs5536.c b/drivers/ata/pata_cs5536.c
index 6c15a554efbe..dc1255294628 100644
--- a/drivers/ata/pata_cs5536.c
+++ b/drivers/ata/pata_cs5536.c
@@ -289,6 +289,7 @@ static int cs5536_init_one(struct pci_dev *dev, const struct pci_device_id *id)
static const struct pci_device_id cs5536[] = {
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CS5536_IDE), },
+ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CS5536_DEV_IDE), },
{ },
};
diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c
index 1ba03d6df951..d3d851b014a3 100644
--- a/drivers/ata/pata_octeon_cf.c
+++ b/drivers/ata/pata_octeon_cf.c
@@ -840,7 +840,6 @@ static int octeon_cf_probe(struct platform_device *pdev)
struct property *reg_prop;
int n_addr, n_size, reg_len;
struct device_node *node;
- const void *prop;
void __iomem *cs0;
void __iomem *cs1 = NULL;
struct ata_host *host;
@@ -850,7 +849,7 @@ static int octeon_cf_probe(struct platform_device *pdev)
void __iomem *base;
struct octeon_cf_port *cf_port;
int rv = -ENOMEM;
-
+ u32 bus_width;
node = pdev->dev.of_node;
if (node == NULL)
@@ -860,11 +859,10 @@ static int octeon_cf_probe(struct platform_device *pdev)
if (!cf_port)
return -ENOMEM;
- cf_port->is_true_ide = (of_find_property(node, "cavium,true-ide", NULL) != NULL);
+ cf_port->is_true_ide = of_property_read_bool(node, "cavium,true-ide");
- prop = of_get_property(node, "cavium,bus-width", NULL);
- if (prop)
- is_16bit = (be32_to_cpup(prop) == 16);
+ if (of_property_read_u32(node, "cavium,bus-width", &bus_width) == 0)
+ is_16bit = (bus_width == 16);
else
is_16bit = false;
diff --git a/drivers/ata/sata_gemini.c b/drivers/ata/sata_gemini.c
index 8c704523bae7..46950e0267e0 100644
--- a/drivers/ata/sata_gemini.c
+++ b/drivers/ata/sata_gemini.c
@@ -15,6 +15,7 @@
#include <linux/of_device.h>
#include <linux/clk.h>
#include <linux/io.h>
+#include <linux/pinctrl/consumer.h>
#include "sata_gemini.h"
#define DRV_NAME "gemini_sata_bridge"
@@ -43,17 +44,6 @@ struct sata_gemini {
struct clk *sata1_pclk;
};
-/* Global IDE PAD Skew Control Register */
-#define GEMINI_GLOBAL_IDE_SKEW_CTRL 0x18
-#define GEMINI_IDE1_HOST_STROBE_DELAY_SHIFT 28
-#define GEMINI_IDE1_DEVICE_STROBE_DELAY_SHIFT 24
-#define GEMINI_IDE1_OUTPUT_IO_SKEW_SHIFT 20
-#define GEMINI_IDE1_INPUT_IO_SKEW_SHIFT 16
-#define GEMINI_IDE0_HOST_STROBE_DELAY_SHIFT 12
-#define GEMINI_IDE0_DEVICE_STROBE_DELAY_SHIFT 8
-#define GEMINI_IDE0_OUTPUT_IO_SKEW_SHIFT 4
-#define GEMINI_IDE0_INPUT_IO_SKEW_SHIFT 0
-
/* Miscellaneous Control Register */
#define GEMINI_GLOBAL_MISC_CTRL 0x30
/*
@@ -91,8 +81,6 @@ struct sata_gemini {
#define GEMINI_IDE_IOMUX_MODE2 (2 << 24)
#define GEMINI_IDE_IOMUX_MODE3 (3 << 24)
#define GEMINI_IDE_IOMUX_SHIFT (24)
-#define GEMINI_IDE_PADS_ENABLE BIT(4)
-#define GEMINI_PFLASH_PADS_DISABLE BIT(1)
/*
* Registers directly controlling the PATA<->SATA adapters
@@ -274,14 +262,14 @@ static int gemini_sata_bridge_init(struct sata_gemini *sg)
return ret;
}
- sg->sata0_reset = devm_reset_control_get(dev, "sata0");
+ sg->sata0_reset = devm_reset_control_get_exclusive(dev, "sata0");
if (IS_ERR(sg->sata0_reset)) {
dev_err(dev, "no SATA0 reset controller\n");
clk_disable_unprepare(sg->sata1_pclk);
clk_disable_unprepare(sg->sata0_pclk);
return PTR_ERR(sg->sata0_reset);
}
- sg->sata1_reset = devm_reset_control_get(dev, "sata1");
+ sg->sata1_reset = devm_reset_control_get_exclusive(dev, "sata1");
if (IS_ERR(sg->sata1_reset)) {
dev_err(dev, "no SATA1 reset controller\n");
clk_disable_unprepare(sg->sata1_pclk);
@@ -300,17 +288,39 @@ static int gemini_sata_bridge_init(struct sata_gemini *sg)
return 0;
}
+static int gemini_setup_ide_pins(struct device *dev)
+{
+ struct pinctrl *p;
+ struct pinctrl_state *ide_state;
+ int ret;
+
+ p = devm_pinctrl_get(dev);
+ if (IS_ERR(p))
+ return PTR_ERR(p);
+
+ ide_state = pinctrl_lookup_state(p, "ide");
+ if (IS_ERR(ide_state))
+ return PTR_ERR(ide_state);
+
+ ret = pinctrl_select_state(p, ide_state);
+ if (ret) {
+ dev_err(dev, "could not select IDE state\n");
+ return ret;
+ }
+
+ return 0;
+}
+
static int gemini_sata_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct sata_gemini *sg;
- static struct regmap *map;
+ struct regmap *map;
struct resource *res;
enum gemini_muxmode muxmode;
u32 gmode;
u32 gmask;
- u32 val;
int ret;
sg = devm_kzalloc(dev, sizeof(*sg), GFP_KERNEL);
@@ -362,16 +372,6 @@ static int gemini_sata_probe(struct platform_device *pdev)
gmask = GEMINI_IDE_IOMUX_MASK;
gmode = (muxmode << GEMINI_IDE_IOMUX_SHIFT);
- /*
- * If we mux out the IDE, parallel flash must be disabled.
- * SATA0 and SATA1 have dedicated pins and may coexist with
- * parallel flash.
- */
- if (sg->ide_pins)
- gmode |= GEMINI_IDE_PADS_ENABLE | GEMINI_PFLASH_PADS_DISABLE;
- else
- gmask |= GEMINI_IDE_PADS_ENABLE;
-
ret = regmap_update_bits(map, GEMINI_GLOBAL_MISC_CTRL, gmask, gmode);
if (ret) {
dev_err(dev, "unable to set up IDE muxing\n");
@@ -379,14 +379,15 @@ static int gemini_sata_probe(struct platform_device *pdev)
goto out_unprep_clk;
}
- /* FIXME: add more elaborate IDE skew control handling */
+ /*
+ * Route out the IDE pins if desired.
+ * This is done by looking up a special pin control state called
+ * "ide" that will route out the IDE pins.
+ */
if (sg->ide_pins) {
- ret = regmap_read(map, GEMINI_GLOBAL_IDE_SKEW_CTRL, &val);
- if (ret) {
- dev_err(dev, "cannot read IDE skew control register\n");
+ ret = gemini_setup_ide_pins(dev);
+ if (ret)
return ret;
- }
- dev_info(dev, "IDE skew control: %08x\n", val);
}
dev_info(dev, "set up the Gemini IDE/SATA nexus\n");
diff --git a/drivers/ata/sata_svw.c b/drivers/ata/sata_svw.c
index 0fd6ac7e57ba..a9d692c6c182 100644
--- a/drivers/ata/sata_svw.c
+++ b/drivers/ata/sata_svw.c
@@ -339,7 +339,7 @@ static int k2_sata_show_info(struct seq_file *m, struct Scsi_Host *shost)
if (!reg)
continue;
if (index == *reg) {
- seq_printf(m, "devspec: %s\n", np->full_name);
+ seq_printf(m, "devspec: %pOF\n", np);
break;
}
}
diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
index 1fd25e872ece..8d98130ecd40 100644
--- a/drivers/atm/adummy.c
+++ b/drivers/atm/adummy.c
@@ -71,7 +71,7 @@ static struct attribute *adummy_attrs[] = {
NULL
};
-static struct attribute_group adummy_group_attrs = {
+static const struct attribute_group adummy_group_attrs = {
.name = NULL, /* We want them in dev's root folder */
.attrs = adummy_attrs
};
@@ -130,7 +130,7 @@ adummy_proc_read(struct atm_dev *dev, loff_t *pos, char *page)
return 0;
}
-static struct atmdev_ops adummy_ops =
+static const struct atmdev_ops adummy_ops =
{
.open = adummy_open,
.close = adummy_close,
diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
index 906705e5f776..acf16c323e38 100644
--- a/drivers/atm/ambassador.c
+++ b/drivers/atm/ambassador.c
@@ -2374,7 +2374,7 @@ MODULE_PARM_DESC(pci_lat, "PCI latency in bus cycles");
/********** module entry **********/
-static struct pci_device_id amb_pci_tbl[] = {
+static const struct pci_device_id amb_pci_tbl[] = {
{ PCI_VDEVICE(MADGE, PCI_DEVICE_ID_MADGE_AMBASSADOR), 0 },
{ PCI_VDEVICE(MADGE, PCI_DEVICE_ID_MADGE_AMBASSADOR_BAD), 0 },
{ 0, }
diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
index 56fa16c85ebf..afebeb1c3e1e 100644
--- a/drivers/atm/atmtcp.c
+++ b/drivers/atm/atmtcp.c
@@ -342,7 +342,7 @@ static struct atmdev_ops atmtcp_v_dev_ops = {
*/
-static struct atmdev_ops atmtcp_c_dev_ops = {
+static const struct atmdev_ops atmtcp_c_dev_ops = {
.close = atmtcp_c_close,
.send = atmtcp_c_send
};
diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
index b042ec458544..ce47eb17901d 100644
--- a/drivers/atm/eni.c
+++ b/drivers/atm/eni.c
@@ -2292,7 +2292,7 @@ err_disable:
}
-static struct pci_device_id eni_pci_tbl[] = {
+static const struct pci_device_id eni_pci_tbl[] = {
{ PCI_VDEVICE(EF, PCI_DEVICE_ID_EF_ATM_FPGA), 0 /* FPGA */ },
{ PCI_VDEVICE(EF, PCI_DEVICE_ID_EF_ATM_ASIC), 1 /* ASIC */ },
{ 0, }
diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
index 22dcab952a24..6b6368a56526 100644
--- a/drivers/atm/firestream.c
+++ b/drivers/atm/firestream.c
@@ -2030,7 +2030,7 @@ static void firestream_remove_one(struct pci_dev *pdev)
func_exit ();
}
-static struct pci_device_id firestream_pci_tbl[] = {
+static const struct pci_device_id firestream_pci_tbl[] = {
{ PCI_VDEVICE(FUJITSU_ME, PCI_DEVICE_ID_FUJITSU_FS50), FS_IS50},
{ PCI_VDEVICE(FUJITSU_ME, PCI_DEVICE_ID_FUJITSU_FS155), FS_IS155},
{ 0, }
diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
index f0433adcd8fc..f8b7e86907cc 100644
--- a/drivers/atm/fore200e.c
+++ b/drivers/atm/fore200e.c
@@ -2757,7 +2757,7 @@ static void fore200e_pca_remove_one(struct pci_dev *pci_dev)
}
-static struct pci_device_id fore200e_pca_tbl[] = {
+static const struct pci_device_id fore200e_pca_tbl[] = {
{ PCI_VENDOR_ID_FORE, PCI_DEVICE_ID_FORE_PCA200E, PCI_ANY_ID, PCI_ANY_ID,
0, 0, (unsigned long) &fore200e_bus[0] },
{ 0, }
diff --git a/drivers/atm/he.c b/drivers/atm/he.c
index 37ee21c5a5ca..e58538c29377 100644
--- a/drivers/atm/he.c
+++ b/drivers/atm/he.c
@@ -161,7 +161,7 @@ static unsigned int clocktab[] = {
CLK_LOW
};
-static struct atmdev_ops he_ops =
+static const struct atmdev_ops he_ops =
{
.open = he_open,
.close = he_close,
@@ -2851,7 +2851,7 @@ MODULE_PARM_DESC(irq_coalesce, "use interrupt coalescing (default 1)");
module_param(sdh, bool, 0);
MODULE_PARM_DESC(sdh, "use SDH framing (default 0)");
-static struct pci_device_id he_pci_tbl[] = {
+static const struct pci_device_id he_pci_tbl[] = {
{ PCI_VDEVICE(FORE, PCI_DEVICE_ID_FORE_HE), 0 },
{ 0, }
};
diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
index 0f18480b33b5..7e76b35f422c 100644
--- a/drivers/atm/horizon.c
+++ b/drivers/atm/horizon.c
@@ -2867,7 +2867,7 @@ MODULE_PARM_DESC(max_tx_size, "maximum size of TX AAL5 frames");
MODULE_PARM_DESC(max_rx_size, "maximum size of RX AAL5 frames");
MODULE_PARM_DESC(pci_lat, "PCI latency in bus cycles");
-static struct pci_device_id hrz_pci_tbl[] = {
+static const struct pci_device_id hrz_pci_tbl[] = {
{ PCI_VENDOR_ID_MADGE, PCI_DEVICE_ID_MADGE_HORIZON, PCI_ANY_ID, PCI_ANY_ID,
0, 0, 0 },
{ 0, }
diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
index 60bacba03d17..47f3c4ae0594 100644
--- a/drivers/atm/idt77252.c
+++ b/drivers/atm/idt77252.c
@@ -134,7 +134,7 @@ static int idt77252_proc_read(struct atm_dev *dev, loff_t * pos,
static void idt77252_softint(struct work_struct *work);
-static struct atmdev_ops idt77252_ops =
+static const struct atmdev_ops idt77252_ops =
{
.dev_close = idt77252_dev_close,
.open = idt77252_open,
@@ -3725,7 +3725,7 @@ err_out_disable_pdev:
return err;
}
-static struct pci_device_id idt77252_pci_tbl[] =
+static const struct pci_device_id idt77252_pci_tbl[] =
{
{ PCI_VDEVICE(IDT, PCI_DEVICE_ID_IDT_IDT77252), 0 },
{ 0, }
diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
index a4fa6c82261e..fc72b763fdd7 100644
--- a/drivers/atm/iphase.c
+++ b/drivers/atm/iphase.c
@@ -3266,7 +3266,7 @@ static void ia_remove_one(struct pci_dev *pdev)
kfree(iadev);
}
-static struct pci_device_id ia_pci_tbl[] = {
+static const struct pci_device_id ia_pci_tbl[] = {
{ PCI_VENDOR_ID_IPHASE, 0x0008, PCI_ANY_ID, PCI_ANY_ID, },
{ PCI_VENDOR_ID_IPHASE, 0x0009, PCI_ANY_ID, PCI_ANY_ID, },
{ 0,}
diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
index 1a9bc51284b0..2351dad78ff5 100644
--- a/drivers/atm/lanai.c
+++ b/drivers/atm/lanai.c
@@ -2589,7 +2589,7 @@ static int lanai_init_one(struct pci_dev *pci,
return result;
}
-static struct pci_device_id lanai_pci_tbl[] = {
+static const struct pci_device_id lanai_pci_tbl[] = {
{ PCI_VDEVICE(EF, PCI_DEVICE_ID_EF_ATM_LANAI2) },
{ PCI_VDEVICE(EF, PCI_DEVICE_ID_EF_ATM_LANAIHB) },
{ 0, } /* terminal entry */
diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
index d879f3bca107..a9702836cbae 100644
--- a/drivers/atm/nicstar.c
+++ b/drivers/atm/nicstar.c
@@ -154,7 +154,7 @@ static unsigned char ns_phy_get(struct atm_dev *dev, unsigned long addr);
static struct ns_dev *cards[NS_MAX_CARDS];
static unsigned num_cards;
-static struct atmdev_ops atm_ops = {
+static const struct atmdev_ops atm_ops = {
.open = ns_open,
.close = ns_close,
.ioctl = ns_ioctl,
@@ -253,7 +253,7 @@ static void nicstar_remove_one(struct pci_dev *pcidev)
kfree(card);
}
-static struct pci_device_id nicstar_pci_tbl[] = {
+static const struct pci_device_id nicstar_pci_tbl[] = {
{ PCI_VDEVICE(IDT, PCI_DEVICE_ID_IDT_IDT77201), 0 },
{0,} /* terminate list */
};
diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
index c8f2ca6d8b29..0df1a1c80b00 100644
--- a/drivers/atm/solos-pci.c
+++ b/drivers/atm/solos-pci.c
@@ -611,7 +611,7 @@ static struct attribute *solos_attrs[] = {
NULL
};
-static struct attribute_group solos_attr_group = {
+static const struct attribute_group solos_attr_group = {
.attrs = solos_attrs,
.name = "parameters",
};
@@ -628,7 +628,7 @@ static struct attribute *gpio_attrs[] = {
NULL
};
-static struct attribute_group gpio_attr_group = {
+static const struct attribute_group gpio_attr_group = {
.attrs = gpio_attrs,
.name = "gpio",
};
@@ -1187,7 +1187,7 @@ static int psend(struct atm_vcc *vcc, struct sk_buff *skb)
return 0;
}
-static struct atmdev_ops fpga_ops = {
+static const struct atmdev_ops fpga_ops = {
.open = popen,
.close = pclose,
.ioctl = NULL,
@@ -1476,7 +1476,7 @@ static void fpga_remove(struct pci_dev *dev)
kfree(card);
}
-static struct pci_device_id fpga_pci_tbl[] = {
+static const struct pci_device_id fpga_pci_tbl[] = {
{ 0x10ee, 0x0300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ 0, }
};
diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
index 07bdd51b3b9a..1ef67db03c8e 100644
--- a/drivers/atm/zatm.c
+++ b/drivers/atm/zatm.c
@@ -1642,7 +1642,7 @@ out_free:
MODULE_LICENSE("GPL");
-static struct pci_device_id zatm_pci_tbl[] = {
+static const struct pci_device_id zatm_pci_tbl[] = {
{ PCI_VDEVICE(ZEITNET, PCI_DEVICE_ID_ZEITNET_1221), ZATM_COPPER },
{ PCI_VDEVICE(ZEITNET, PCI_DEVICE_ID_ZEITNET_1225), 0 },
{ 0, }
diff --git a/drivers/auxdisplay/panel.c b/drivers/auxdisplay/panel.c
index 7a8b8fb2f572..df126dcdaf18 100644
--- a/drivers/auxdisplay/panel.c
+++ b/drivers/auxdisplay/panel.c
@@ -877,21 +877,21 @@ static void lcd_clear_fast_tilcd(struct charlcd *charlcd)
spin_unlock_irq(&pprt_lock);
}
-static struct charlcd_ops charlcd_serial_ops = {
+static const struct charlcd_ops charlcd_serial_ops = {
.write_cmd = lcd_write_cmd_s,
.write_data = lcd_write_data_s,
.clear_fast = lcd_clear_fast_s,
.backlight = lcd_backlight,
};
-static struct charlcd_ops charlcd_parallel_ops = {
+static const struct charlcd_ops charlcd_parallel_ops = {
.write_cmd = lcd_write_cmd_p8,
.write_data = lcd_write_data_p8,
.clear_fast = lcd_clear_fast_p8,
.backlight = lcd_backlight,
};
-static struct charlcd_ops charlcd_tilcd_ops = {
+static const struct charlcd_ops charlcd_tilcd_ops = {
.write_cmd = lcd_write_cmd_tilcd,
.write_data = lcd_write_data_tilcd,
.clear_fast = lcd_clear_fast_tilcd,
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index f046d21de57d..1a5f6a157a57 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -140,13 +140,10 @@ config EXTRA_FIRMWARE
config EXTRA_FIRMWARE_DIR
string "Firmware blobs root directory"
depends on EXTRA_FIRMWARE != ""
- default "firmware"
+ default "/lib/firmware"
help
This option controls the directory in which the kernel build system
looks for the firmware files listed in the EXTRA_FIRMWARE option.
- The default is firmware/ in the kernel source tree, but by changing
- this option you can point it elsewhere, such as /lib/firmware/ or
- some other directory containing the firmware files.
config FW_LOADER_USER_HELPER
bool
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c
index d1c33a85059e..41be9ff7d70a 100644
--- a/drivers/base/arch_topology.c
+++ b/drivers/base/arch_topology.c
@@ -41,8 +41,7 @@ static ssize_t cpu_capacity_show(struct device *dev,
{
struct cpu *cpu = container_of(dev, struct cpu, dev);
- return sprintf(buf, "%lu\n",
- topology_get_cpu_scale(NULL, cpu->dev.id));
+ return sprintf(buf, "%lu\n", topology_get_cpu_scale(NULL, cpu->dev.id));
}
static ssize_t cpu_capacity_store(struct device *dev,
@@ -96,14 +95,21 @@ subsys_initcall(register_cpu_capacity_sysctl);
static u32 capacity_scale;
static u32 *raw_capacity;
-static bool cap_parsing_failed;
+
+static int __init free_raw_capacity(void)
+{
+ kfree(raw_capacity);
+ raw_capacity = NULL;
+
+ return 0;
+}
void topology_normalize_cpu_scale(void)
{
u64 capacity;
int cpu;
- if (!raw_capacity || cap_parsing_failed)
+ if (!raw_capacity)
return;
pr_debug("cpu_capacity: capacity_scale=%u\n", capacity_scale);
@@ -120,16 +126,16 @@ void topology_normalize_cpu_scale(void)
mutex_unlock(&cpu_scale_mutex);
}
-int __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu)
+bool __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu)
{
- int ret = 1;
+ static bool cap_parsing_failed;
+ int ret;
u32 cpu_capacity;
if (cap_parsing_failed)
- return !ret;
+ return false;
- ret = of_property_read_u32(cpu_node,
- "capacity-dmips-mhz",
+ ret = of_property_read_u32(cpu_node, "capacity-dmips-mhz",
&cpu_capacity);
if (!ret) {
if (!raw_capacity) {
@@ -139,21 +145,21 @@ int __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu)
if (!raw_capacity) {
pr_err("cpu_capacity: failed to allocate memory for raw capacities\n");
cap_parsing_failed = true;
- return 0;
+ return false;
}
}
capacity_scale = max(cpu_capacity, capacity_scale);
raw_capacity[cpu] = cpu_capacity;
- pr_debug("cpu_capacity: %s cpu_capacity=%u (raw)\n",
- cpu_node->full_name, raw_capacity[cpu]);
+ pr_debug("cpu_capacity: %pOF cpu_capacity=%u (raw)\n",
+ cpu_node, raw_capacity[cpu]);
} else {
if (raw_capacity) {
- pr_err("cpu_capacity: missing %s raw capacity\n",
- cpu_node->full_name);
+ pr_err("cpu_capacity: missing %pOF raw capacity\n",
+ cpu_node);
pr_err("cpu_capacity: partial information: fallback to 1024 for all CPUs\n");
}
cap_parsing_failed = true;
- kfree(raw_capacity);
+ free_raw_capacity();
}
return !ret;
@@ -161,7 +167,6 @@ int __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu)
#ifdef CONFIG_CPU_FREQ
static cpumask_var_t cpus_to_visit;
-static bool cap_parsing_done;
static void parsing_done_workfn(struct work_struct *work);
static DECLARE_WORK(parsing_done_work, parsing_done_workfn);
@@ -173,30 +178,31 @@ init_cpu_capacity_callback(struct notifier_block *nb,
struct cpufreq_policy *policy = data;
int cpu;
- if (cap_parsing_failed || cap_parsing_done)
+ if (!raw_capacity)
return 0;
- switch (val) {
- case CPUFREQ_NOTIFY:
- pr_debug("cpu_capacity: init cpu capacity for CPUs [%*pbl] (to_visit=%*pbl)\n",
- cpumask_pr_args(policy->related_cpus),
- cpumask_pr_args(cpus_to_visit));
- cpumask_andnot(cpus_to_visit,
- cpus_to_visit,
- policy->related_cpus);
- for_each_cpu(cpu, policy->related_cpus) {
- raw_capacity[cpu] = topology_get_cpu_scale(NULL, cpu) *
- policy->cpuinfo.max_freq / 1000UL;
- capacity_scale = max(raw_capacity[cpu], capacity_scale);
- }
- if (cpumask_empty(cpus_to_visit)) {
- topology_normalize_cpu_scale();
- kfree(raw_capacity);
- pr_debug("cpu_capacity: parsing done\n");
- cap_parsing_done = true;
- schedule_work(&parsing_done_work);
- }
+ if (val != CPUFREQ_NOTIFY)
+ return 0;
+
+ pr_debug("cpu_capacity: init cpu capacity for CPUs [%*pbl] (to_visit=%*pbl)\n",
+ cpumask_pr_args(policy->related_cpus),
+ cpumask_pr_args(cpus_to_visit));
+
+ cpumask_andnot(cpus_to_visit, cpus_to_visit, policy->related_cpus);
+
+ for_each_cpu(cpu, policy->related_cpus) {
+ raw_capacity[cpu] = topology_get_cpu_scale(NULL, cpu) *
+ policy->cpuinfo.max_freq / 1000UL;
+ capacity_scale = max(raw_capacity[cpu], capacity_scale);
+ }
+
+ if (cpumask_empty(cpus_to_visit)) {
+ topology_normalize_cpu_scale();
+ free_raw_capacity();
+ pr_debug("cpu_capacity: parsing done\n");
+ schedule_work(&parsing_done_work);
}
+
return 0;
}
@@ -233,11 +239,5 @@ static void parsing_done_workfn(struct work_struct *work)
}
#else
-static int __init free_raw_capacity(void)
-{
- kfree(raw_capacity);
-
- return 0;
-}
core_initcall(free_raw_capacity);
#endif
diff --git a/drivers/base/base.h b/drivers/base/base.h
index e19b1008e5fb..539432a14b5c 100644
--- a/drivers/base/base.h
+++ b/drivers/base/base.h
@@ -126,11 +126,6 @@ extern int driver_add_groups(struct device_driver *drv,
extern void driver_remove_groups(struct device_driver *drv,
const struct attribute_group **groups);
-extern int device_add_groups(struct device *dev,
- const struct attribute_group **groups);
-extern void device_remove_groups(struct device *dev,
- const struct attribute_group **groups);
-
extern char *make_class_name(const char *name, struct kobject *kobj);
extern int devres_release_all(struct device *dev);
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
index e162c9a789ba..22a64fd3309b 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -698,7 +698,7 @@ int bus_add_driver(struct device_driver *drv)
out_unregister:
kobject_put(&priv->kobj);
- kfree(drv->p);
+ /* drv->p is freed in driver_release() */
drv->p = NULL;
out_put_bus:
bus_put(bus);
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 755451f684bc..12ebd055724c 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -1023,12 +1023,144 @@ int device_add_groups(struct device *dev, const struct attribute_group **groups)
{
return sysfs_create_groups(&dev->kobj, groups);
}
+EXPORT_SYMBOL_GPL(device_add_groups);
void device_remove_groups(struct device *dev,
const struct attribute_group **groups)
{
sysfs_remove_groups(&dev->kobj, groups);
}
+EXPORT_SYMBOL_GPL(device_remove_groups);
+
+union device_attr_group_devres {
+ const struct attribute_group *group;
+ const struct attribute_group **groups;
+};
+
+static int devm_attr_group_match(struct device *dev, void *res, void *data)
+{
+ return ((union device_attr_group_devres *)res)->group == data;
+}
+
+static void devm_attr_group_remove(struct device *dev, void *res)
+{
+ union device_attr_group_devres *devres = res;
+ const struct attribute_group *group = devres->group;
+
+ dev_dbg(dev, "%s: removing group %p\n", __func__, group);
+ sysfs_remove_group(&dev->kobj, group);
+}
+
+static void devm_attr_groups_remove(struct device *dev, void *res)
+{
+ union device_attr_group_devres *devres = res;
+ const struct attribute_group **groups = devres->groups;
+
+ dev_dbg(dev, "%s: removing groups %p\n", __func__, groups);
+ sysfs_remove_groups(&dev->kobj, groups);
+}
+
+/**
+ * devm_device_add_group - given a device, create a managed attribute group
+ * @dev: The device to create the group for
+ * @grp: The attribute group to create
+ *
+ * This function creates a group for the first time. It will explicitly
+ * warn and error if any of the attribute files being created already exist.
+ *
+ * Returns 0 on success or error code on failure.
+ */
+int devm_device_add_group(struct device *dev, const struct attribute_group *grp)
+{
+ union device_attr_group_devres *devres;
+ int error;
+
+ devres = devres_alloc(devm_attr_group_remove,
+ sizeof(*devres), GFP_KERNEL);
+ if (!devres)
+ return -ENOMEM;
+
+ error = sysfs_create_group(&dev->kobj, grp);
+ if (error) {
+ devres_free(devres);
+ return error;
+ }
+
+ devres->group = grp;
+ devres_add(dev, devres);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(devm_device_add_group);
+
+/**
+ * devm_device_remove_group: remove a managed group from a device
+ * @dev: device to remove the group from
+ * @grp: group to remove
+ *
+ * This function removes a group of attributes from a device. The attributes
+ * previously have to have been created for this group, otherwise it will fail.
+ */
+void devm_device_remove_group(struct device *dev,
+ const struct attribute_group *grp)
+{
+ WARN_ON(devres_release(dev, devm_attr_group_remove,
+ devm_attr_group_match,
+ /* cast away const */ (void *)grp));
+}
+EXPORT_SYMBOL_GPL(devm_device_remove_group);
+
+/**
+ * devm_device_add_groups - create a bunch of managed attribute groups
+ * @dev: The device to create the group for
+ * @groups: The attribute groups to create, NULL terminated
+ *
+ * This function creates a bunch of managed attribute groups. If an error
+ * occurs when creating a group, all previously created groups will be
+ * removed, unwinding everything back to the original state when this
+ * function was called. It will explicitly warn and error if any of the
+ * attribute files being created already exist.
+ *
+ * Returns 0 on success or error code from sysfs_create_group on failure.
+ */
+int devm_device_add_groups(struct device *dev,
+ const struct attribute_group **groups)
+{
+ union device_attr_group_devres *devres;
+ int error;
+
+ devres = devres_alloc(devm_attr_groups_remove,
+ sizeof(*devres), GFP_KERNEL);
+ if (!devres)
+ return -ENOMEM;
+
+ error = sysfs_create_groups(&dev->kobj, groups);
+ if (error) {
+ devres_free(devres);
+ return error;
+ }
+
+ devres->groups = groups;
+ devres_add(dev, devres);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(devm_device_add_groups);
+
+/**
+ * devm_device_remove_groups - remove a list of managed groups
+ *
+ * @dev: The device for the groups to be removed from
+ * @groups: NULL terminated list of groups to be removed
+ *
+ * If groups is not NULL, remove the specified groups from the device.
+ */
+void devm_device_remove_groups(struct device *dev,
+ const struct attribute_group **groups)
+{
+ WARN_ON(devres_release(dev, devm_attr_groups_remove,
+ devm_attr_group_match,
+ /* cast away const */ (void *)groups));
+}
+EXPORT_SYMBOL_GPL(devm_device_remove_groups);
static int device_add_attrs(struct device *dev)
{
@@ -2664,11 +2796,12 @@ void device_shutdown(void)
pm_runtime_get_noresume(dev);
pm_runtime_barrier(dev);
- if (dev->class && dev->class->shutdown) {
+ if (dev->class && dev->class->shutdown_pre) {
if (initcall_debug)
- dev_info(dev, "shutdown\n");
- dev->class->shutdown(dev);
- } else if (dev->bus && dev->bus->shutdown) {
+ dev_info(dev, "shutdown_pre\n");
+ dev->class->shutdown_pre(dev);
+ }
+ if (dev->bus && dev->bus->shutdown) {
if (initcall_debug)
dev_info(dev, "shutdown\n");
dev->bus->shutdown(dev);
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 2c3b359b3536..321cd7b4d817 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -256,9 +256,9 @@ static ssize_t print_cpus_offline(struct device *dev,
buf[n++] = ',';
if (nr_cpu_ids == total_cpus-1)
- n += snprintf(&buf[n], len - n, "%d", nr_cpu_ids);
+ n += snprintf(&buf[n], len - n, "%u", nr_cpu_ids);
else
- n += snprintf(&buf[n], len - n, "%d-%d",
+ n += snprintf(&buf[n], len - n, "%u-%d",
nr_cpu_ids, total_cpus-1);
}
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index 4882f06d12df..ad44b40fe284 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -20,6 +20,7 @@
#include <linux/device.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
+#include <linux/init.h>
#include <linux/module.h>
#include <linux/kthread.h>
#include <linux/wait.h>
@@ -53,6 +54,7 @@ static DEFINE_MUTEX(deferred_probe_mutex);
static LIST_HEAD(deferred_probe_pending_list);
static LIST_HEAD(deferred_probe_active_list);
static atomic_t deferred_trigger_count = ATOMIC_INIT(0);
+static bool initcalls_done;
/*
* In some cases, like suspend to RAM or hibernation, It might be reasonable
@@ -62,6 +64,26 @@ static atomic_t deferred_trigger_count = ATOMIC_INIT(0);
static bool defer_all_probes;
/*
+ * For initcall_debug, show the deferred probes executed in late_initcall
+ * processing.
+ */
+static void deferred_probe_debug(struct device *dev)
+{
+ ktime_t calltime, delta, rettime;
+ unsigned long long duration;
+
+ printk(KERN_DEBUG "deferred probe %s @ %i\n", dev_name(dev),
+ task_pid_nr(current));
+ calltime = ktime_get();
+ bus_probe_device(dev);
+ rettime = ktime_get();
+ delta = ktime_sub(rettime, calltime);
+ duration = (unsigned long long) ktime_to_ns(delta) >> 10;
+ printk(KERN_DEBUG "deferred probe %s returned after %lld usecs\n",
+ dev_name(dev), duration);
+}
+
+/*
* deferred_probe_work_func() - Retry probing devices in the active list.
*/
static void deferred_probe_work_func(struct work_struct *work)
@@ -106,7 +128,10 @@ static void deferred_probe_work_func(struct work_struct *work)
device_pm_unlock();
dev_dbg(dev, "Retrying from deferred list\n");
- bus_probe_device(dev);
+ if (initcall_debug && !initcalls_done)
+ deferred_probe_debug(dev);
+ else
+ bus_probe_device(dev);
mutex_lock(&deferred_probe_mutex);
@@ -215,6 +240,7 @@ static int deferred_probe_initcall(void)
driver_deferred_probe_trigger();
/* Sort as many dependencies as possible before exiting initcalls */
flush_work(&deferred_probe_work);
+ initcalls_done = true;
return 0;
}
late_initcall(deferred_probe_initcall);
@@ -259,6 +285,8 @@ static void driver_bound(struct device *dev)
if (dev->bus)
blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
BUS_NOTIFY_BOUND_DRIVER, dev);
+
+ kobject_uevent(&dev->kobj, KOBJ_BIND);
}
static int driver_sysfs_add(struct device *dev)
@@ -848,6 +876,8 @@ static void __device_release_driver(struct device *dev, struct device *parent)
blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
BUS_NOTIFY_UNBOUND_DRIVER,
dev);
+
+ kobject_uevent(&dev->kobj, KOBJ_UNBIND);
}
}
diff --git a/drivers/base/dma-coherent.c b/drivers/base/dma-coherent.c
index 1c152aed6b82..a39b2166b145 100644
--- a/drivers/base/dma-coherent.c
+++ b/drivers/base/dma-coherent.c
@@ -37,7 +37,7 @@ static inline dma_addr_t dma_get_device_base(struct device *dev,
return mem->device_base;
}
-static bool dma_init_coherent_memory(
+static int dma_init_coherent_memory(
phys_addr_t phys_addr, dma_addr_t device_addr, size_t size, int flags,
struct dma_coherent_mem **mem)
{
@@ -45,25 +45,28 @@ static bool dma_init_coherent_memory(
void __iomem *mem_base = NULL;
int pages = size >> PAGE_SHIFT;
int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long);
+ int ret;
- if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0)
- goto out;
- if (!size)
+ if (!size) {
+ ret = -EINVAL;
goto out;
+ }
- if (flags & DMA_MEMORY_MAP)
- mem_base = memremap(phys_addr, size, MEMREMAP_WC);
- else
- mem_base = ioremap(phys_addr, size);
- if (!mem_base)
+ mem_base = memremap(phys_addr, size, MEMREMAP_WC);
+ if (!mem_base) {
+ ret = -EINVAL;
goto out;
-
+ }
dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
- if (!dma_mem)
+ if (!dma_mem) {
+ ret = -ENOMEM;
goto out;
+ }
dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
- if (!dma_mem->bitmap)
+ if (!dma_mem->bitmap) {
+ ret = -ENOMEM;
goto out;
+ }
dma_mem->virt_base = mem_base;
dma_mem->device_base = device_addr;
@@ -73,17 +76,13 @@ static bool dma_init_coherent_memory(
spin_lock_init(&dma_mem->spinlock);
*mem = dma_mem;
- return true;
+ return 0;
out:
kfree(dma_mem);
- if (mem_base) {
- if (flags & DMA_MEMORY_MAP)
- memunmap(mem_base);
- else
- iounmap(mem_base);
- }
- return false;
+ if (mem_base)
+ memunmap(mem_base);
+ return ret;
}
static void dma_release_coherent_memory(struct dma_coherent_mem *mem)
@@ -91,10 +90,7 @@ static void dma_release_coherent_memory(struct dma_coherent_mem *mem)
if (!mem)
return;
- if (mem->flags & DMA_MEMORY_MAP)
- memunmap(mem->virt_base);
- else
- iounmap(mem->virt_base);
+ memunmap(mem->virt_base);
kfree(mem->bitmap);
kfree(mem);
}
@@ -109,8 +105,6 @@ static int dma_assign_coherent_memory(struct device *dev,
return -EBUSY;
dev->dma_mem = mem;
- /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */
-
return 0;
}
@@ -118,16 +112,16 @@ int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
dma_addr_t device_addr, size_t size, int flags)
{
struct dma_coherent_mem *mem;
+ int ret;
- if (!dma_init_coherent_memory(phys_addr, device_addr, size, flags,
- &mem))
- return 0;
-
- if (dma_assign_coherent_memory(dev, mem) == 0)
- return flags & DMA_MEMORY_MAP ? DMA_MEMORY_MAP : DMA_MEMORY_IO;
+ ret = dma_init_coherent_memory(phys_addr, device_addr, size, flags, &mem);
+ if (ret)
+ return ret;
- dma_release_coherent_memory(mem);
- return 0;
+ ret = dma_assign_coherent_memory(dev, mem);
+ if (ret)
+ dma_release_coherent_memory(mem);
+ return ret;
}
EXPORT_SYMBOL(dma_declare_coherent_memory);
@@ -171,7 +165,6 @@ static void *__dma_alloc_from_coherent(struct dma_coherent_mem *mem,
int order = get_order(size);
unsigned long flags;
int pageno;
- int dma_memory_map;
void *ret;
spin_lock_irqsave(&mem->spinlock, flags);
@@ -188,15 +181,9 @@ static void *__dma_alloc_from_coherent(struct dma_coherent_mem *mem,
*/
*dma_handle = mem->device_base + (pageno << PAGE_SHIFT);
ret = mem->virt_base + (pageno << PAGE_SHIFT);
- dma_memory_map = (mem->flags & DMA_MEMORY_MAP);
spin_unlock_irqrestore(&mem->spinlock, flags);
- if (dma_memory_map)
- memset(ret, 0, size);
- else
- memset_io(ret, 0, size);
-
+ memset(ret, 0, size);
return ret;
-
err:
spin_unlock_irqrestore(&mem->spinlock, flags);
return NULL;
@@ -359,14 +346,18 @@ static struct reserved_mem *dma_reserved_default_memory __initdata;
static int rmem_dma_device_init(struct reserved_mem *rmem, struct device *dev)
{
struct dma_coherent_mem *mem = rmem->priv;
+ int ret;
- if (!mem &&
- !dma_init_coherent_memory(rmem->base, rmem->base, rmem->size,
- DMA_MEMORY_MAP | DMA_MEMORY_EXCLUSIVE,
- &mem)) {
+ if (!mem)
+ return -ENODEV;
+
+ ret = dma_init_coherent_memory(rmem->base, rmem->base, rmem->size,
+ DMA_MEMORY_EXCLUSIVE, &mem);
+
+ if (ret) {
pr_err("Reserved memory: failed to init DMA memory pool at %pa, size %ld MiB\n",
&rmem->base, (unsigned long)rmem->size / SZ_1M);
- return -ENODEV;
+ return ret;
}
mem->use_dev_dma_pfn_offset = true;
rmem->priv = mem;
diff --git a/drivers/base/dma-mapping.c b/drivers/base/dma-mapping.c
index b555ff9dd8fc..e584eddef0a7 100644
--- a/drivers/base/dma-mapping.c
+++ b/drivers/base/dma-mapping.c
@@ -176,13 +176,10 @@ int dmam_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
rc = dma_declare_coherent_memory(dev, phys_addr, device_addr, size,
flags);
- if (rc) {
+ if (!rc)
devres_add(dev, res);
- rc = 0;
- } else {
+ else
devres_free(res);
- rc = -ENOMEM;
- }
return rc;
}
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index bfbe1e154128..4b57cf5bc81d 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -7,6 +7,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/capability.h>
#include <linux/device.h>
#include <linux/module.h>
@@ -256,38 +258,6 @@ static int fw_cache_piggyback_on_request(const char *name);
* guarding for corner cases a global lock should be OK */
static DEFINE_MUTEX(fw_lock);
-static bool __enable_firmware = false;
-
-static void enable_firmware(void)
-{
- mutex_lock(&fw_lock);
- __enable_firmware = true;
- mutex_unlock(&fw_lock);
-}
-
-static void disable_firmware(void)
-{
- mutex_lock(&fw_lock);
- __enable_firmware = false;
- mutex_unlock(&fw_lock);
-}
-
-/*
- * When disabled only the built-in firmware and the firmware cache will be
- * used to look for firmware.
- */
-static bool firmware_enabled(void)
-{
- bool enabled = false;
-
- mutex_lock(&fw_lock);
- if (__enable_firmware)
- enabled = true;
- mutex_unlock(&fw_lock);
-
- return enabled;
-}
-
static struct firmware_cache fw_cache;
static struct firmware_buf *__allocate_fw_buf(const char *fw_name,
@@ -331,6 +301,7 @@ static struct firmware_buf *__fw_lookup_buf(const char *fw_name)
return NULL;
}
+/* Returns 1 for batching firmware requests with the same name */
static int fw_lookup_and_allocate_buf(const char *fw_name,
struct firmware_cache *fwc,
struct firmware_buf **buf, void *dbuf,
@@ -344,6 +315,7 @@ static int fw_lookup_and_allocate_buf(const char *fw_name,
kref_get(&tmp->ref);
spin_unlock(&fwc->lock);
*buf = tmp;
+ pr_debug("batched request - sharing the same struct firmware_buf and lookup for multiple requests\n");
return 1;
}
tmp = __allocate_fw_buf(fw_name, fwc, dbuf, size);
@@ -1085,9 +1057,12 @@ static int _request_firmware_load(struct firmware_priv *fw_priv,
mutex_unlock(&fw_lock);
}
- if (fw_state_is_aborted(&buf->fw_st))
- retval = -EAGAIN;
- else if (buf->is_paged_buf && !buf->data)
+ if (fw_state_is_aborted(&buf->fw_st)) {
+ if (retval == -ERESTARTSYS)
+ retval = -EINTR;
+ else
+ retval = -EAGAIN;
+ } else if (buf->is_paged_buf && !buf->data)
retval = -ENOMEM;
device_del(f_dev);
@@ -1239,12 +1214,6 @@ _request_firmware(const struct firmware **firmware_p, const char *name,
if (ret <= 0) /* error or already assigned */
goto out;
- if (!firmware_enabled()) {
- WARN(1, "firmware request while host is not available\n");
- ret = -EHOSTDOWN;
- goto out;
- }
-
ret = fw_get_filesystem_firmware(device, fw->priv);
if (ret) {
if (!(opt_flags & FW_OPT_NO_WARN))
@@ -1755,62 +1724,6 @@ static void device_uncache_fw_images_delay(unsigned long delay)
msecs_to_jiffies(delay));
}
-/**
- * fw_pm_notify - notifier for suspend/resume
- * @notify_block: unused
- * @mode: mode we are switching to
- * @unused: unused
- *
- * Used to modify the firmware_class state as we move in between states.
- * The firmware_class implements a firmware cache to enable device driver
- * to fetch firmware upon resume before the root filesystem is ready. We
- * disable API calls which do not use the built-in firmware or the firmware
- * cache when we know these calls will not work.
- *
- * The inner logic behind all this is a bit complex so it is worth summarizing
- * the kernel's own suspend/resume process with context and focus on how this
- * can impact the firmware API.
- *
- * First a review on how we go to suspend::
- *
- * pm_suspend() --> enter_state() -->
- * sys_sync()
- * suspend_prepare() -->
- * __pm_notifier_call_chain(PM_SUSPEND_PREPARE, ...);
- * suspend_freeze_processes() -->
- * freeze_processes() -->
- * __usermodehelper_set_disable_depth(UMH_DISABLED);
- * freeze all tasks ...
- * freeze_kernel_threads()
- * suspend_devices_and_enter() -->
- * dpm_suspend_start() -->
- * dpm_prepare()
- * dpm_suspend()
- * suspend_enter() -->
- * platform_suspend_prepare()
- * dpm_suspend_late()
- * freeze_enter()
- * syscore_suspend()
- *
- * When we resume we bail out of a loop from suspend_devices_and_enter() and
- * unwind back out to the caller enter_state() where we were before as follows::
- *
- * enter_state() -->
- * suspend_devices_and_enter() --> (bail from loop)
- * dpm_resume_end() -->
- * dpm_resume()
- * dpm_complete()
- * suspend_finish() -->
- * suspend_thaw_processes() -->
- * thaw_processes() -->
- * __usermodehelper_set_disable_depth(UMH_FREEZING);
- * thaw_workqueues();
- * thaw all processes ...
- * usermodehelper_enable();
- * pm_notifier_call_chain(PM_POST_SUSPEND);
- *
- * fw_pm_notify() works through pm_notifier_call_chain().
- */
static int fw_pm_notify(struct notifier_block *notify_block,
unsigned long mode, void *unused)
{
@@ -1824,7 +1737,6 @@ static int fw_pm_notify(struct notifier_block *notify_block,
*/
kill_pending_fw_fallback_reqs(true);
device_cache_fw_images();
- disable_firmware();
break;
case PM_POST_SUSPEND:
@@ -1837,7 +1749,6 @@ static int fw_pm_notify(struct notifier_block *notify_block,
mutex_lock(&fw_lock);
fw_cache.state = FW_LOADER_NO_CACHE;
mutex_unlock(&fw_lock);
- enable_firmware();
device_uncache_fw_images_delay(10 * MSEC_PER_SEC);
break;
@@ -1886,7 +1797,6 @@ static void __init fw_cache_init(void)
static int fw_shutdown_notify(struct notifier_block *unused1,
unsigned long unused2, void *unused3)
{
- disable_firmware();
/*
* Kill all pending fallback requests to avoid both stalling shutdown,
* and avoid a deadlock with the usermode_lock.
@@ -1902,7 +1812,6 @@ static struct notifier_block fw_shutdown_nb = {
static int __init firmware_class_init(void)
{
- enable_firmware();
fw_cache_init();
register_reboot_notifier(&fw_shutdown_nb);
#ifdef CONFIG_FW_LOADER_USER_HELPER
@@ -1914,7 +1823,6 @@ static int __init firmware_class_init(void)
static void __exit firmware_class_exit(void)
{
- disable_firmware();
#ifdef CONFIG_PM_SLEEP
unregister_syscore_ops(&fw_syscore_ops);
unregister_pm_notifier(&fw_cache.pm_notify);
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index c7c4e0325cdb..4e3b61cda520 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -388,6 +388,19 @@ static ssize_t show_phys_device(struct device *dev,
}
#ifdef CONFIG_MEMORY_HOTREMOVE
+static void print_allowed_zone(char *buf, int nid, unsigned long start_pfn,
+ unsigned long nr_pages, int online_type,
+ struct zone *default_zone)
+{
+ struct zone *zone;
+
+ zone = zone_for_pfn_range(online_type, nid, start_pfn, nr_pages);
+ if (zone != default_zone) {
+ strcat(buf, " ");
+ strcat(buf, zone->name);
+ }
+}
+
static ssize_t show_valid_zones(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -395,7 +408,7 @@ static ssize_t show_valid_zones(struct device *dev,
unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr);
unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
unsigned long valid_start_pfn, valid_end_pfn;
- bool append = false;
+ struct zone *default_zone;
int nid;
/*
@@ -418,16 +431,13 @@ static ssize_t show_valid_zones(struct device *dev,
}
nid = pfn_to_nid(start_pfn);
- if (allow_online_pfn_range(nid, start_pfn, nr_pages, MMOP_ONLINE_KERNEL)) {
- strcat(buf, default_zone_for_pfn(nid, start_pfn, nr_pages)->name);
- append = true;
- }
+ default_zone = zone_for_pfn_range(MMOP_ONLINE_KEEP, nid, start_pfn, nr_pages);
+ strcat(buf, default_zone->name);
- if (allow_online_pfn_range(nid, start_pfn, nr_pages, MMOP_ONLINE_MOVABLE)) {
- if (append)
- strcat(buf, " ");
- strcat(buf, NODE_DATA(nid)->node_zones[ZONE_MOVABLE].name);
- }
+ print_allowed_zone(buf, nid, start_pfn, nr_pages, MMOP_ONLINE_KERNEL,
+ default_zone);
+ print_allowed_zone(buf, nid, start_pfn, nr_pages, MMOP_ONLINE_MOVABLE,
+ default_zone);
out:
strcat(buf, "\n");
diff --git a/drivers/base/node.c b/drivers/base/node.c
index d8dc83017d8d..3855902f2c5b 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -160,12 +160,12 @@ static ssize_t node_read_numastat(struct device *dev,
"interleave_hit %lu\n"
"local_node %lu\n"
"other_node %lu\n",
- sum_zone_node_page_state(dev->id, NUMA_HIT),
- sum_zone_node_page_state(dev->id, NUMA_MISS),
- sum_zone_node_page_state(dev->id, NUMA_FOREIGN),
- sum_zone_node_page_state(dev->id, NUMA_INTERLEAVE_HIT),
- sum_zone_node_page_state(dev->id, NUMA_LOCAL),
- sum_zone_node_page_state(dev->id, NUMA_OTHER));
+ sum_zone_numa_state(dev->id, NUMA_HIT),
+ sum_zone_numa_state(dev->id, NUMA_MISS),
+ sum_zone_numa_state(dev->id, NUMA_FOREIGN),
+ sum_zone_numa_state(dev->id, NUMA_INTERLEAVE_HIT),
+ sum_zone_numa_state(dev->id, NUMA_LOCAL),
+ sum_zone_numa_state(dev->id, NUMA_OTHER));
}
static DEVICE_ATTR(numastat, S_IRUGO, node_read_numastat, NULL);
@@ -181,9 +181,17 @@ static ssize_t node_read_vmstat(struct device *dev,
n += sprintf(buf+n, "%s %lu\n", vmstat_text[i],
sum_zone_node_page_state(nid, i));
- for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
+#ifdef CONFIG_NUMA
+ for (i = 0; i < NR_VM_NUMA_STAT_ITEMS; i++)
n += sprintf(buf+n, "%s %lu\n",
vmstat_text[i + NR_VM_ZONE_STAT_ITEMS],
+ sum_zone_numa_state(nid, i));
+#endif
+
+ for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
+ n += sprintf(buf+n, "%s %lu\n",
+ vmstat_text[i + NR_VM_ZONE_STAT_ITEMS +
+ NR_VM_NUMA_STAT_ITEMS],
node_page_state(pgdat, i));
return n;
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 60303aa28587..e8ca5e2cf1e5 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -209,6 +209,34 @@ static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
smp_mb__after_atomic();
}
+#ifdef CONFIG_DEBUG_FS
+static void genpd_update_accounting(struct generic_pm_domain *genpd)
+{
+ ktime_t delta, now;
+
+ now = ktime_get();
+ delta = ktime_sub(now, genpd->accounting_time);
+
+ /*
+ * If genpd->status is active, it means we are just
+ * out of off and so update the idle time and vice
+ * versa.
+ */
+ if (genpd->status == GPD_STATE_ACTIVE) {
+ int state_idx = genpd->state_idx;
+
+ genpd->states[state_idx].idle_time =
+ ktime_add(genpd->states[state_idx].idle_time, delta);
+ } else {
+ genpd->on_time = ktime_add(genpd->on_time, delta);
+ }
+
+ genpd->accounting_time = now;
+}
+#else
+static inline void genpd_update_accounting(struct generic_pm_domain *genpd) {}
+#endif
+
static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed)
{
unsigned int state_idx = genpd->state_idx;
@@ -361,6 +389,7 @@ static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on,
}
genpd->status = GPD_STATE_POWER_OFF;
+ genpd_update_accounting(genpd);
list_for_each_entry(link, &genpd->slave_links, slave_node) {
genpd_sd_counter_dec(link->master);
@@ -413,6 +442,8 @@ static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth)
goto err;
genpd->status = GPD_STATE_ACTIVE;
+ genpd_update_accounting(genpd);
+
return 0;
err:
@@ -1540,6 +1571,7 @@ int pm_genpd_init(struct generic_pm_domain *genpd,
genpd->max_off_time_changed = true;
genpd->provider = NULL;
genpd->has_provider = false;
+ genpd->accounting_time = ktime_get();
genpd->domain.ops.runtime_suspend = genpd_runtime_suspend;
genpd->domain.ops.runtime_resume = genpd_runtime_resume;
genpd->domain.ops.prepare = pm_genpd_prepare;
@@ -1743,7 +1775,7 @@ static int genpd_add_provider(struct device_node *np, genpd_xlate_t xlate,
mutex_lock(&of_genpd_mutex);
list_add(&cp->link, &of_genpd_providers);
mutex_unlock(&of_genpd_mutex);
- pr_debug("Added domain provider from %s\n", np->full_name);
+ pr_debug("Added domain provider from %pOF\n", np);
return 0;
}
@@ -2149,16 +2181,16 @@ static int genpd_parse_state(struct genpd_power_state *genpd_state,
err = of_property_read_u32(state_node, "entry-latency-us",
&entry_latency);
if (err) {
- pr_debug(" * %s missing entry-latency-us property\n",
- state_node->full_name);
+ pr_debug(" * %pOF missing entry-latency-us property\n",
+ state_node);
return -EINVAL;
}
err = of_property_read_u32(state_node, "exit-latency-us",
&exit_latency);
if (err) {
- pr_debug(" * %s missing exit-latency-us property\n",
- state_node->full_name);
+ pr_debug(" * %pOF missing exit-latency-us property\n",
+ state_node);
return -EINVAL;
}
@@ -2212,8 +2244,8 @@ int of_genpd_parse_idle_states(struct device_node *dn,
ret = genpd_parse_state(&st[i++], np);
if (ret) {
pr_err
- ("Parsing idle state node %s failed with err %d\n",
- np->full_name, ret);
+ ("Parsing idle state node %pOF failed with err %d\n",
+ np, ret);
of_node_put(np);
kfree(st);
return ret;
@@ -2327,7 +2359,7 @@ exit:
return 0;
}
-static int pm_genpd_summary_show(struct seq_file *s, void *data)
+static int genpd_summary_show(struct seq_file *s, void *data)
{
struct generic_pm_domain *genpd;
int ret = 0;
@@ -2350,21 +2382,187 @@ static int pm_genpd_summary_show(struct seq_file *s, void *data)
return ret;
}
-static int pm_genpd_summary_open(struct inode *inode, struct file *file)
+static int genpd_status_show(struct seq_file *s, void *data)
{
- return single_open(file, pm_genpd_summary_show, NULL);
+ static const char * const status_lookup[] = {
+ [GPD_STATE_ACTIVE] = "on",
+ [GPD_STATE_POWER_OFF] = "off"
+ };
+
+ struct generic_pm_domain *genpd = s->private;
+ int ret = 0;
+
+ ret = genpd_lock_interruptible(genpd);
+ if (ret)
+ return -ERESTARTSYS;
+
+ if (WARN_ON_ONCE(genpd->status >= ARRAY_SIZE(status_lookup)))
+ goto exit;
+
+ if (genpd->status == GPD_STATE_POWER_OFF)
+ seq_printf(s, "%s-%u\n", status_lookup[genpd->status],
+ genpd->state_idx);
+ else
+ seq_printf(s, "%s\n", status_lookup[genpd->status]);
+exit:
+ genpd_unlock(genpd);
+ return ret;
}
-static const struct file_operations pm_genpd_summary_fops = {
- .open = pm_genpd_summary_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+static int genpd_sub_domains_show(struct seq_file *s, void *data)
+{
+ struct generic_pm_domain *genpd = s->private;
+ struct gpd_link *link;
+ int ret = 0;
+
+ ret = genpd_lock_interruptible(genpd);
+ if (ret)
+ return -ERESTARTSYS;
+
+ list_for_each_entry(link, &genpd->master_links, master_node)
+ seq_printf(s, "%s\n", link->slave->name);
+
+ genpd_unlock(genpd);
+ return ret;
+}
+
+static int genpd_idle_states_show(struct seq_file *s, void *data)
+{
+ struct generic_pm_domain *genpd = s->private;
+ unsigned int i;
+ int ret = 0;
+
+ ret = genpd_lock_interruptible(genpd);
+ if (ret)
+ return -ERESTARTSYS;
+
+ seq_puts(s, "State Time Spent(ms)\n");
+
+ for (i = 0; i < genpd->state_count; i++) {
+ ktime_t delta = 0;
+ s64 msecs;
+
+ if ((genpd->status == GPD_STATE_POWER_OFF) &&
+ (genpd->state_idx == i))
+ delta = ktime_sub(ktime_get(), genpd->accounting_time);
+
+ msecs = ktime_to_ms(
+ ktime_add(genpd->states[i].idle_time, delta));
+ seq_printf(s, "S%-13i %lld\n", i, msecs);
+ }
+
+ genpd_unlock(genpd);
+ return ret;
+}
+
+static int genpd_active_time_show(struct seq_file *s, void *data)
+{
+ struct generic_pm_domain *genpd = s->private;
+ ktime_t delta = 0;
+ int ret = 0;
+
+ ret = genpd_lock_interruptible(genpd);
+ if (ret)
+ return -ERESTARTSYS;
+
+ if (genpd->status == GPD_STATE_ACTIVE)
+ delta = ktime_sub(ktime_get(), genpd->accounting_time);
+
+ seq_printf(s, "%lld ms\n", ktime_to_ms(
+ ktime_add(genpd->on_time, delta)));
+
+ genpd_unlock(genpd);
+ return ret;
+}
+
+static int genpd_total_idle_time_show(struct seq_file *s, void *data)
+{
+ struct generic_pm_domain *genpd = s->private;
+ ktime_t delta = 0, total = 0;
+ unsigned int i;
+ int ret = 0;
+
+ ret = genpd_lock_interruptible(genpd);
+ if (ret)
+ return -ERESTARTSYS;
+
+ for (i = 0; i < genpd->state_count; i++) {
+
+ if ((genpd->status == GPD_STATE_POWER_OFF) &&
+ (genpd->state_idx == i))
+ delta = ktime_sub(ktime_get(), genpd->accounting_time);
+
+ total = ktime_add(total, genpd->states[i].idle_time);
+ }
+ total = ktime_add(total, delta);
+
+ seq_printf(s, "%lld ms\n", ktime_to_ms(total));
+
+ genpd_unlock(genpd);
+ return ret;
+}
+
+
+static int genpd_devices_show(struct seq_file *s, void *data)
+{
+ struct generic_pm_domain *genpd = s->private;
+ struct pm_domain_data *pm_data;
+ const char *kobj_path;
+ int ret = 0;
+
+ ret = genpd_lock_interruptible(genpd);
+ if (ret)
+ return -ERESTARTSYS;
+
+ list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
+ kobj_path = kobject_get_path(&pm_data->dev->kobj,
+ genpd_is_irq_safe(genpd) ?
+ GFP_ATOMIC : GFP_KERNEL);
+ if (kobj_path == NULL)
+ continue;
+
+ seq_printf(s, "%s\n", kobj_path);
+ kfree(kobj_path);
+ }
+
+ genpd_unlock(genpd);
+ return ret;
+}
+
+#define define_genpd_open_function(name) \
+static int genpd_##name##_open(struct inode *inode, struct file *file) \
+{ \
+ return single_open(file, genpd_##name##_show, inode->i_private); \
+}
+
+define_genpd_open_function(summary);
+define_genpd_open_function(status);
+define_genpd_open_function(sub_domains);
+define_genpd_open_function(idle_states);
+define_genpd_open_function(active_time);
+define_genpd_open_function(total_idle_time);
+define_genpd_open_function(devices);
+
+#define define_genpd_debugfs_fops(name) \
+static const struct file_operations genpd_##name##_fops = { \
+ .open = genpd_##name##_open, \
+ .read = seq_read, \
+ .llseek = seq_lseek, \
+ .release = single_release, \
+}
+
+define_genpd_debugfs_fops(summary);
+define_genpd_debugfs_fops(status);
+define_genpd_debugfs_fops(sub_domains);
+define_genpd_debugfs_fops(idle_states);
+define_genpd_debugfs_fops(active_time);
+define_genpd_debugfs_fops(total_idle_time);
+define_genpd_debugfs_fops(devices);
static int __init pm_genpd_debug_init(void)
{
struct dentry *d;
+ struct generic_pm_domain *genpd;
pm_genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL);
@@ -2372,10 +2570,29 @@ static int __init pm_genpd_debug_init(void)
return -ENOMEM;
d = debugfs_create_file("pm_genpd_summary", S_IRUGO,
- pm_genpd_debugfs_dir, NULL, &pm_genpd_summary_fops);
+ pm_genpd_debugfs_dir, NULL, &genpd_summary_fops);
if (!d)
return -ENOMEM;
+ list_for_each_entry(genpd, &gpd_list, gpd_list_node) {
+ d = debugfs_create_dir(genpd->name, pm_genpd_debugfs_dir);
+ if (!d)
+ return -ENOMEM;
+
+ debugfs_create_file("current_state", 0444,
+ d, genpd, &genpd_status_fops);
+ debugfs_create_file("sub_domains", 0444,
+ d, genpd, &genpd_sub_domains_fops);
+ debugfs_create_file("idle_states", 0444,
+ d, genpd, &genpd_idle_states_fops);
+ debugfs_create_file("active_time", 0444,
+ d, genpd, &genpd_active_time_fops);
+ debugfs_create_file("total_idle_time", 0444,
+ d, genpd, &genpd_total_idle_time_fops);
+ debugfs_create_file("devices", 0444,
+ d, genpd, &genpd_devices_fops);
+ }
+
return 0;
}
late_initcall(pm_genpd_debug_init);
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index c99f8730de82..ea1732ed7a9d 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -418,8 +418,7 @@ static void pm_dev_err(struct device *dev, pm_message_t state, const char *info,
dev_name(dev), pm_verb(state.event), info, error);
}
-#ifdef CONFIG_PM_DEBUG
-static void dpm_show_time(ktime_t starttime, pm_message_t state,
+static void dpm_show_time(ktime_t starttime, pm_message_t state, int error,
const char *info)
{
ktime_t calltime;
@@ -432,14 +431,12 @@ static void dpm_show_time(ktime_t starttime, pm_message_t state,
usecs = usecs64;
if (usecs == 0)
usecs = 1;
- pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n",
- info ?: "", info ? " " : "", pm_verb(state.event),
- usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
+
+ pm_pr_dbg("%s%s%s of devices %s after %ld.%03ld msecs\n",
+ info ?: "", info ? " " : "", pm_verb(state.event),
+ error ? "aborted" : "complete",
+ usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
}
-#else
-static inline void dpm_show_time(ktime_t starttime, pm_message_t state,
- const char *info) {}
-#endif /* CONFIG_PM_DEBUG */
static int dpm_run_callback(pm_callback_t cb, struct device *dev,
pm_message_t state, const char *info)
@@ -602,14 +599,7 @@ static void async_resume_noirq(void *data, async_cookie_t cookie)
put_device(dev);
}
-/**
- * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
- * @state: PM transition of the system being carried out.
- *
- * Call the "noirq" resume handlers for all devices in dpm_noirq_list and
- * enable device drivers to receive interrupts.
- */
-void dpm_resume_noirq(pm_message_t state)
+void dpm_noirq_resume_devices(pm_message_t state)
{
struct device *dev;
ktime_t starttime = ktime_get();
@@ -654,11 +644,28 @@ void dpm_resume_noirq(pm_message_t state)
}
mutex_unlock(&dpm_list_mtx);
async_synchronize_full();
- dpm_show_time(starttime, state, "noirq");
+ dpm_show_time(starttime, state, 0, "noirq");
+ trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
+}
+
+void dpm_noirq_end(void)
+{
resume_device_irqs();
device_wakeup_disarm_wake_irqs();
cpuidle_resume();
- trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
+}
+
+/**
+ * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
+ * @state: PM transition of the system being carried out.
+ *
+ * Invoke the "noirq" resume callbacks for all devices in dpm_noirq_list and
+ * allow device drivers' interrupt handlers to be called.
+ */
+void dpm_resume_noirq(pm_message_t state)
+{
+ dpm_noirq_resume_devices(state);
+ dpm_noirq_end();
}
/**
@@ -776,7 +783,7 @@ void dpm_resume_early(pm_message_t state)
}
mutex_unlock(&dpm_list_mtx);
async_synchronize_full();
- dpm_show_time(starttime, state, "early");
+ dpm_show_time(starttime, state, 0, "early");
trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
}
@@ -948,7 +955,7 @@ void dpm_resume(pm_message_t state)
}
mutex_unlock(&dpm_list_mtx);
async_synchronize_full();
- dpm_show_time(starttime, state, NULL);
+ dpm_show_time(starttime, state, 0, NULL);
cpufreq_resume();
trace_suspend_resume(TPS("dpm_resume"), state.event, false);
@@ -1098,6 +1105,11 @@ static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool a
if (async_error)
goto Complete;
+ if (pm_wakeup_pending()) {
+ async_error = -EBUSY;
+ goto Complete;
+ }
+
if (dev->power.syscore || dev->power.direct_complete)
goto Complete;
@@ -1158,22 +1170,19 @@ static int device_suspend_noirq(struct device *dev)
return __device_suspend_noirq(dev, pm_transition, false);
}
-/**
- * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
- * @state: PM transition of the system being carried out.
- *
- * Prevent device drivers from receiving interrupts and call the "noirq" suspend
- * handlers for all non-sysdev devices.
- */
-int dpm_suspend_noirq(pm_message_t state)
+void dpm_noirq_begin(void)
+{
+ cpuidle_pause();
+ device_wakeup_arm_wake_irqs();
+ suspend_device_irqs();
+}
+
+int dpm_noirq_suspend_devices(pm_message_t state)
{
ktime_t starttime = ktime_get();
int error = 0;
trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
- cpuidle_pause();
- device_wakeup_arm_wake_irqs();
- suspend_device_irqs();
mutex_lock(&dpm_list_mtx);
pm_transition = state;
async_error = 0;
@@ -1208,15 +1217,32 @@ int dpm_suspend_noirq(pm_message_t state)
if (error) {
suspend_stats.failed_suspend_noirq++;
dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
- dpm_resume_noirq(resume_event(state));
- } else {
- dpm_show_time(starttime, state, "noirq");
}
+ dpm_show_time(starttime, state, error, "noirq");
trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);
return error;
}
/**
+ * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
+ * @state: PM transition of the system being carried out.
+ *
+ * Prevent device drivers' interrupt handlers from being called and invoke
+ * "noirq" suspend callbacks for all non-sysdev devices.
+ */
+int dpm_suspend_noirq(pm_message_t state)
+{
+ int ret;
+
+ dpm_noirq_begin();
+ ret = dpm_noirq_suspend_devices(state);
+ if (ret)
+ dpm_resume_noirq(resume_event(state));
+
+ return ret;
+}
+
+/**
* device_suspend_late - Execute a "late suspend" callback for given device.
* @dev: Device to handle.
* @state: PM transition of the system being carried out.
@@ -1350,9 +1376,8 @@ int dpm_suspend_late(pm_message_t state)
suspend_stats.failed_suspend_late++;
dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
dpm_resume_early(resume_event(state));
- } else {
- dpm_show_time(starttime, state, "late");
}
+ dpm_show_time(starttime, state, error, "late");
trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false);
return error;
}
@@ -1618,8 +1643,8 @@ int dpm_suspend(pm_message_t state)
if (error) {
suspend_stats.failed_suspend++;
dpm_save_failed_step(SUSPEND_SUSPEND);
- } else
- dpm_show_time(starttime, state, NULL);
+ }
+ dpm_show_time(starttime, state, error, NULL);
trace_suspend_resume(TPS("dpm_suspend"), state.event, false);
return error;
}
diff --git a/drivers/base/power/opp/of.c b/drivers/base/power/opp/of.c
index 57eec1ca0569..0b718886479b 100644
--- a/drivers/base/power/opp/of.c
+++ b/drivers/base/power/opp/of.c
@@ -248,15 +248,22 @@ void dev_pm_opp_of_remove_table(struct device *dev)
}
EXPORT_SYMBOL_GPL(dev_pm_opp_of_remove_table);
-/* Returns opp descriptor node for a device, caller must do of_node_put() */
-struct device_node *dev_pm_opp_of_get_opp_desc_node(struct device *dev)
+/* Returns opp descriptor node for a device node, caller must
+ * do of_node_put() */
+static struct device_node *_opp_of_get_opp_desc_node(struct device_node *np)
{
/*
* There should be only ONE phandle present in "operating-points-v2"
* property.
*/
- return of_parse_phandle(dev->of_node, "operating-points-v2", 0);
+ return of_parse_phandle(np, "operating-points-v2", 0);
+}
+
+/* Returns opp descriptor node for a device, caller must do of_node_put() */
+struct device_node *dev_pm_opp_of_get_opp_desc_node(struct device *dev)
+{
+ return _opp_of_get_opp_desc_node(dev->of_node);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_of_get_opp_desc_node);
@@ -539,8 +546,12 @@ int dev_pm_opp_of_cpumask_add_table(const struct cpumask *cpumask)
ret = dev_pm_opp_of_add_table(cpu_dev);
if (ret) {
- pr_err("%s: couldn't find opp table for cpu:%d, %d\n",
- __func__, cpu, ret);
+ /*
+ * OPP may get registered dynamically, don't print error
+ * message here.
+ */
+ pr_debug("%s: couldn't find opp table for cpu:%d, %d\n",
+ __func__, cpu, ret);
/* Free all other OPPs */
dev_pm_opp_of_cpumask_remove_table(cpumask);
@@ -572,8 +583,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_of_cpumask_add_table);
int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev,
struct cpumask *cpumask)
{
- struct device_node *np, *tmp_np;
- struct device *tcpu_dev;
+ struct device_node *np, *tmp_np, *cpu_np;
int cpu, ret = 0;
/* Get OPP descriptor node */
@@ -593,19 +603,18 @@ int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev,
if (cpu == cpu_dev->id)
continue;
- tcpu_dev = get_cpu_device(cpu);
- if (!tcpu_dev) {
- dev_err(cpu_dev, "%s: failed to get cpu%d device\n",
+ cpu_np = of_get_cpu_node(cpu, NULL);
+ if (!cpu_np) {
+ dev_err(cpu_dev, "%s: failed to get cpu%d node\n",
__func__, cpu);
- ret = -ENODEV;
+ ret = -ENOENT;
goto put_cpu_node;
}
/* Get OPP descriptor node */
- tmp_np = dev_pm_opp_of_get_opp_desc_node(tcpu_dev);
+ tmp_np = _opp_of_get_opp_desc_node(cpu_np);
if (!tmp_np) {
- dev_err(tcpu_dev, "%s: Couldn't find opp node.\n",
- __func__);
+ pr_err("%pOF: Couldn't find opp node\n", cpu_np);
ret = -ENOENT;
goto put_cpu_node;
}
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index 144e6d8fafc8..cdd6f256da59 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -412,15 +412,17 @@ void device_set_wakeup_capable(struct device *dev, bool capable)
if (!!dev->power.can_wakeup == !!capable)
return;
+ dev->power.can_wakeup = capable;
if (device_is_registered(dev) && !list_empty(&dev->power.entry)) {
if (capable) {
- if (wakeup_sysfs_add(dev))
- return;
+ int ret = wakeup_sysfs_add(dev);
+
+ if (ret)
+ dev_info(dev, "Wakeup sysfs attributes not added\n");
} else {
wakeup_sysfs_remove(dev);
}
}
- dev->power.can_wakeup = capable;
}
EXPORT_SYMBOL_GPL(device_set_wakeup_capable);
@@ -863,7 +865,7 @@ bool pm_wakeup_pending(void)
void pm_system_wakeup(void)
{
atomic_inc(&pm_abort_suspend);
- freeze_wake();
+ s2idle_wake();
}
EXPORT_SYMBOL_GPL(pm_system_wakeup);
diff --git a/drivers/base/property.c b/drivers/base/property.c
index edf02c1b5845..d0b65bbe7e15 100644
--- a/drivers/base/property.c
+++ b/drivers/base/property.c
@@ -25,19 +25,25 @@ struct property_set {
const struct property_entry *properties;
};
-static inline bool is_pset_node(struct fwnode_handle *fwnode)
-{
- return !IS_ERR_OR_NULL(fwnode) && fwnode->type == FWNODE_PDATA;
-}
+static const struct fwnode_operations pset_fwnode_ops;
-static inline struct property_set *to_pset_node(struct fwnode_handle *fwnode)
+static inline bool is_pset_node(const struct fwnode_handle *fwnode)
{
- return is_pset_node(fwnode) ?
- container_of(fwnode, struct property_set, fwnode) : NULL;
+ return !IS_ERR_OR_NULL(fwnode) && fwnode->ops == &pset_fwnode_ops;
}
-static const struct property_entry *pset_prop_get(struct property_set *pset,
- const char *name)
+#define to_pset_node(__fwnode) \
+ ({ \
+ typeof(__fwnode) __to_pset_node_fwnode = __fwnode; \
+ \
+ is_pset_node(__to_pset_node_fwnode) ? \
+ container_of(__to_pset_node_fwnode, \
+ struct property_set, fwnode) : \
+ NULL; \
+ })
+
+static const struct property_entry *
+pset_prop_get(const struct property_set *pset, const char *name)
{
const struct property_entry *prop;
@@ -51,7 +57,7 @@ static const struct property_entry *pset_prop_get(struct property_set *pset,
return NULL;
}
-static const void *pset_prop_find(struct property_set *pset,
+static const void *pset_prop_find(const struct property_set *pset,
const char *propname, size_t length)
{
const struct property_entry *prop;
@@ -71,7 +77,7 @@ static const void *pset_prop_find(struct property_set *pset,
return pointer;
}
-static int pset_prop_read_u8_array(struct property_set *pset,
+static int pset_prop_read_u8_array(const struct property_set *pset,
const char *propname,
u8 *values, size_t nval)
{
@@ -86,7 +92,7 @@ static int pset_prop_read_u8_array(struct property_set *pset,
return 0;
}
-static int pset_prop_read_u16_array(struct property_set *pset,
+static int pset_prop_read_u16_array(const struct property_set *pset,
const char *propname,
u16 *values, size_t nval)
{
@@ -101,7 +107,7 @@ static int pset_prop_read_u16_array(struct property_set *pset,
return 0;
}
-static int pset_prop_read_u32_array(struct property_set *pset,
+static int pset_prop_read_u32_array(const struct property_set *pset,
const char *propname,
u32 *values, size_t nval)
{
@@ -116,7 +122,7 @@ static int pset_prop_read_u32_array(struct property_set *pset,
return 0;
}
-static int pset_prop_read_u64_array(struct property_set *pset,
+static int pset_prop_read_u64_array(const struct property_set *pset,
const char *propname,
u64 *values, size_t nval)
{
@@ -131,7 +137,7 @@ static int pset_prop_read_u64_array(struct property_set *pset,
return 0;
}
-static int pset_prop_count_elems_of_size(struct property_set *pset,
+static int pset_prop_count_elems_of_size(const struct property_set *pset,
const char *propname, size_t length)
{
const struct property_entry *prop;
@@ -143,7 +149,7 @@ static int pset_prop_count_elems_of_size(struct property_set *pset,
return prop->length / length;
}
-static int pset_prop_read_string_array(struct property_set *pset,
+static int pset_prop_read_string_array(const struct property_set *pset,
const char *propname,
const char **strings, size_t nval)
{
@@ -187,18 +193,18 @@ struct fwnode_handle *dev_fwnode(struct device *dev)
}
EXPORT_SYMBOL_GPL(dev_fwnode);
-static bool pset_fwnode_property_present(struct fwnode_handle *fwnode,
+static bool pset_fwnode_property_present(const struct fwnode_handle *fwnode,
const char *propname)
{
return !!pset_prop_get(to_pset_node(fwnode), propname);
}
-static int pset_fwnode_read_int_array(struct fwnode_handle *fwnode,
+static int pset_fwnode_read_int_array(const struct fwnode_handle *fwnode,
const char *propname,
unsigned int elem_size, void *val,
size_t nval)
{
- struct property_set *node = to_pset_node(fwnode);
+ const struct property_set *node = to_pset_node(fwnode);
if (!val)
return pset_prop_count_elems_of_size(node, propname, elem_size);
@@ -217,9 +223,10 @@ static int pset_fwnode_read_int_array(struct fwnode_handle *fwnode,
return -ENXIO;
}
-static int pset_fwnode_property_read_string_array(struct fwnode_handle *fwnode,
- const char *propname,
- const char **val, size_t nval)
+static int
+pset_fwnode_property_read_string_array(const struct fwnode_handle *fwnode,
+ const char *propname,
+ const char **val, size_t nval)
{
return pset_prop_read_string_array(to_pset_node(fwnode), propname,
val, nval);
@@ -249,7 +256,8 @@ EXPORT_SYMBOL_GPL(device_property_present);
* @fwnode: Firmware node whose property to check
* @propname: Name of the property
*/
-bool fwnode_property_present(struct fwnode_handle *fwnode, const char *propname)
+bool fwnode_property_present(const struct fwnode_handle *fwnode,
+ const char *propname)
{
bool ret;
@@ -431,7 +439,7 @@ int device_property_match_string(struct device *dev, const char *propname,
}
EXPORT_SYMBOL_GPL(device_property_match_string);
-static int fwnode_property_read_int_array(struct fwnode_handle *fwnode,
+static int fwnode_property_read_int_array(const struct fwnode_handle *fwnode,
const char *propname,
unsigned int elem_size, void *val,
size_t nval)
@@ -467,7 +475,7 @@ static int fwnode_property_read_int_array(struct fwnode_handle *fwnode,
* %-EOVERFLOW if the size of the property is not as expected,
* %-ENXIO if no suitable firmware interface is present.
*/
-int fwnode_property_read_u8_array(struct fwnode_handle *fwnode,
+int fwnode_property_read_u8_array(const struct fwnode_handle *fwnode,
const char *propname, u8 *val, size_t nval)
{
return fwnode_property_read_int_array(fwnode, propname, sizeof(u8),
@@ -493,7 +501,7 @@ EXPORT_SYMBOL_GPL(fwnode_property_read_u8_array);
* %-EOVERFLOW if the size of the property is not as expected,
* %-ENXIO if no suitable firmware interface is present.
*/
-int fwnode_property_read_u16_array(struct fwnode_handle *fwnode,
+int fwnode_property_read_u16_array(const struct fwnode_handle *fwnode,
const char *propname, u16 *val, size_t nval)
{
return fwnode_property_read_int_array(fwnode, propname, sizeof(u16),
@@ -519,7 +527,7 @@ EXPORT_SYMBOL_GPL(fwnode_property_read_u16_array);
* %-EOVERFLOW if the size of the property is not as expected,
* %-ENXIO if no suitable firmware interface is present.
*/
-int fwnode_property_read_u32_array(struct fwnode_handle *fwnode,
+int fwnode_property_read_u32_array(const struct fwnode_handle *fwnode,
const char *propname, u32 *val, size_t nval)
{
return fwnode_property_read_int_array(fwnode, propname, sizeof(u32),
@@ -545,7 +553,7 @@ EXPORT_SYMBOL_GPL(fwnode_property_read_u32_array);
* %-EOVERFLOW if the size of the property is not as expected,
* %-ENXIO if no suitable firmware interface is present.
*/
-int fwnode_property_read_u64_array(struct fwnode_handle *fwnode,
+int fwnode_property_read_u64_array(const struct fwnode_handle *fwnode,
const char *propname, u64 *val, size_t nval)
{
return fwnode_property_read_int_array(fwnode, propname, sizeof(u64),
@@ -571,7 +579,7 @@ EXPORT_SYMBOL_GPL(fwnode_property_read_u64_array);
* %-EOVERFLOW if the size of the property is not as expected,
* %-ENXIO if no suitable firmware interface is present.
*/
-int fwnode_property_read_string_array(struct fwnode_handle *fwnode,
+int fwnode_property_read_string_array(const struct fwnode_handle *fwnode,
const char *propname, const char **val,
size_t nval)
{
@@ -603,7 +611,7 @@ EXPORT_SYMBOL_GPL(fwnode_property_read_string_array);
* %-EPROTO or %-EILSEQ if the property is not a string,
* %-ENXIO if no suitable firmware interface is present.
*/
-int fwnode_property_read_string(struct fwnode_handle *fwnode,
+int fwnode_property_read_string(const struct fwnode_handle *fwnode,
const char *propname, const char **val)
{
int ret = fwnode_property_read_string_array(fwnode, propname, val, 1);
@@ -627,7 +635,7 @@ EXPORT_SYMBOL_GPL(fwnode_property_read_string);
* %-EPROTO if the property is not an array of strings,
* %-ENXIO if no suitable firmware interface is present.
*/
-int fwnode_property_match_string(struct fwnode_handle *fwnode,
+int fwnode_property_match_string(const struct fwnode_handle *fwnode,
const char *propname, const char *string)
{
const char **values;
@@ -657,6 +665,34 @@ out:
}
EXPORT_SYMBOL_GPL(fwnode_property_match_string);
+/**
+ * fwnode_property_get_reference_args() - Find a reference with arguments
+ * @fwnode: Firmware node where to look for the reference
+ * @prop: The name of the property
+ * @nargs_prop: The name of the property telling the number of
+ * arguments in the referred node. NULL if @nargs is known,
+ * otherwise @nargs is ignored. Only relevant on OF.
+ * @nargs: Number of arguments. Ignored if @nargs_prop is non-NULL.
+ * @index: Index of the reference, from zero onwards.
+ * @args: Result structure with reference and integer arguments.
+ *
+ * Obtain a reference based on a named property in an fwnode, with
+ * integer arguments.
+ *
+ * Caller is responsible to call fwnode_handle_put() on the returned
+ * args->fwnode pointer.
+ *
+ */
+int fwnode_property_get_reference_args(const struct fwnode_handle *fwnode,
+ const char *prop, const char *nargs_prop,
+ unsigned int nargs, unsigned int index,
+ struct fwnode_reference_args *args)
+{
+ return fwnode_call_int_op(fwnode, get_reference_args, prop, nargs_prop,
+ nargs, index, args);
+}
+EXPORT_SYMBOL_GPL(fwnode_property_get_reference_args);
+
static int property_copy_string_array(struct property_entry *dst,
const struct property_entry *src)
{
@@ -900,7 +936,6 @@ int device_add_properties(struct device *dev,
if (IS_ERR(p))
return PTR_ERR(p);
- p->fwnode.type = FWNODE_PDATA;
p->fwnode.ops = &pset_fwnode_ops;
set_secondary_fwnode(dev, &p->fwnode);
return 0;
@@ -935,7 +970,7 @@ EXPORT_SYMBOL_GPL(fwnode_get_next_parent);
* Return parent firmware node of the given node if possible or %NULL if no
* parent was available.
*/
-struct fwnode_handle *fwnode_get_parent(struct fwnode_handle *fwnode)
+struct fwnode_handle *fwnode_get_parent(const struct fwnode_handle *fwnode)
{
return fwnode_call_ptr_op(fwnode, get_parent);
}
@@ -946,8 +981,9 @@ EXPORT_SYMBOL_GPL(fwnode_get_parent);
* @fwnode: Firmware node to find the next child node for.
* @child: Handle to one of the node's child nodes or a %NULL handle.
*/
-struct fwnode_handle *fwnode_get_next_child_node(struct fwnode_handle *fwnode,
- struct fwnode_handle *child)
+struct fwnode_handle *
+fwnode_get_next_child_node(const struct fwnode_handle *fwnode,
+ struct fwnode_handle *child)
{
return fwnode_call_ptr_op(fwnode, get_next_child_node, child);
}
@@ -978,8 +1014,9 @@ EXPORT_SYMBOL_GPL(device_get_next_child_node);
* @fwnode: Firmware node to find the named child node for.
* @childname: String to match child node name against.
*/
-struct fwnode_handle *fwnode_get_named_child_node(struct fwnode_handle *fwnode,
- const char *childname)
+struct fwnode_handle *
+fwnode_get_named_child_node(const struct fwnode_handle *fwnode,
+ const char *childname)
{
return fwnode_call_ptr_op(fwnode, get_named_child_node, childname);
}
@@ -1025,7 +1062,7 @@ EXPORT_SYMBOL_GPL(fwnode_handle_put);
* fwnode_device_is_available - check if a device is available for use
* @fwnode: Pointer to the fwnode of the device.
*/
-bool fwnode_device_is_available(struct fwnode_handle *fwnode)
+bool fwnode_device_is_available(const struct fwnode_handle *fwnode)
{
return fwnode_call_bool_op(fwnode, device_is_available);
}
@@ -1163,7 +1200,7 @@ EXPORT_SYMBOL(device_get_mac_address);
* are available.
*/
struct fwnode_handle *
-fwnode_graph_get_next_endpoint(struct fwnode_handle *fwnode,
+fwnode_graph_get_next_endpoint(const struct fwnode_handle *fwnode,
struct fwnode_handle *prev)
{
return fwnode_call_ptr_op(fwnode, graph_get_next_endpoint, prev);
@@ -1177,7 +1214,7 @@ EXPORT_SYMBOL_GPL(fwnode_graph_get_next_endpoint);
* Return: the firmware node of the device the @endpoint belongs to.
*/
struct fwnode_handle *
-fwnode_graph_get_port_parent(struct fwnode_handle *endpoint)
+fwnode_graph_get_port_parent(const struct fwnode_handle *endpoint)
{
struct fwnode_handle *port, *parent;
@@ -1197,7 +1234,7 @@ EXPORT_SYMBOL_GPL(fwnode_graph_get_port_parent);
* Extracts firmware node of a remote device the @fwnode points to.
*/
struct fwnode_handle *
-fwnode_graph_get_remote_port_parent(struct fwnode_handle *fwnode)
+fwnode_graph_get_remote_port_parent(const struct fwnode_handle *fwnode)
{
struct fwnode_handle *endpoint, *parent;
@@ -1216,7 +1253,8 @@ EXPORT_SYMBOL_GPL(fwnode_graph_get_remote_port_parent);
*
* Extracts firmware node of a remote port the @fwnode points to.
*/
-struct fwnode_handle *fwnode_graph_get_remote_port(struct fwnode_handle *fwnode)
+struct fwnode_handle *
+fwnode_graph_get_remote_port(const struct fwnode_handle *fwnode)
{
return fwnode_get_next_parent(fwnode_graph_get_remote_endpoint(fwnode));
}
@@ -1229,7 +1267,7 @@ EXPORT_SYMBOL_GPL(fwnode_graph_get_remote_port);
* Extracts firmware node of a remote endpoint the @fwnode points to.
*/
struct fwnode_handle *
-fwnode_graph_get_remote_endpoint(struct fwnode_handle *fwnode)
+fwnode_graph_get_remote_endpoint(const struct fwnode_handle *fwnode)
{
return fwnode_call_ptr_op(fwnode, graph_get_remote_endpoint);
}
@@ -1244,8 +1282,9 @@ EXPORT_SYMBOL_GPL(fwnode_graph_get_remote_endpoint);
* Return: Remote fwnode handle associated with remote endpoint node linked
* to @node. Use fwnode_node_put() on it when done.
*/
-struct fwnode_handle *fwnode_graph_get_remote_node(struct fwnode_handle *fwnode,
- u32 port_id, u32 endpoint_id)
+struct fwnode_handle *
+fwnode_graph_get_remote_node(const struct fwnode_handle *fwnode, u32 port_id,
+ u32 endpoint_id)
{
struct fwnode_handle *endpoint = NULL;
@@ -1281,7 +1320,7 @@ EXPORT_SYMBOL_GPL(fwnode_graph_get_remote_node);
* information in @endpoint. The caller must hold a reference to
* @fwnode.
*/
-int fwnode_graph_parse_endpoint(struct fwnode_handle *fwnode,
+int fwnode_graph_parse_endpoint(const struct fwnode_handle *fwnode,
struct fwnode_endpoint *endpoint)
{
memset(endpoint, 0, sizeof(*endpoint));
diff --git a/drivers/base/topology.c b/drivers/base/topology.c
index d6ec1c546f5b..d936fcf9f1fb 100644
--- a/drivers/base/topology.c
+++ b/drivers/base/topology.c
@@ -105,7 +105,7 @@ static struct attribute *default_attrs[] = {
NULL
};
-static struct attribute_group topology_attr_group = {
+static const struct attribute_group topology_attr_group = {
.attrs = default_attrs,
.name = "topology"
};
diff --git a/drivers/bcma/Kconfig b/drivers/bcma/Kconfig
index b5c48a8d485f..54f81c554815 100644
--- a/drivers/bcma/Kconfig
+++ b/drivers/bcma/Kconfig
@@ -3,11 +3,8 @@ config BCMA_POSSIBLE
depends on HAS_IOMEM && HAS_DMA
default y
-menu "Broadcom specific AMBA"
- depends on BCMA_POSSIBLE
-
-config BCMA
- tristate "BCMA support"
+menuconfig BCMA
+ tristate "Broadcom specific AMBA"
depends on BCMA_POSSIBLE
help
Bus driver for Broadcom specific Advanced Microcontroller Bus
@@ -117,5 +114,3 @@ config BCMA_DEBUG
This turns on additional debugging messages.
If unsure, say N
-
-endmenu
diff --git a/drivers/bcma/driver_gpio.c b/drivers/bcma/driver_gpio.c
index 7bde8d7a2816..982d5781d3ce 100644
--- a/drivers/bcma/driver_gpio.c
+++ b/drivers/bcma/driver_gpio.c
@@ -191,6 +191,7 @@ int bcma_gpio_init(struct bcma_drv_cc *cc)
case BCMA_CHIP_ID_BCM4707:
case BCMA_CHIP_ID_BCM5357:
case BCMA_CHIP_ID_BCM53572:
+ case BCMA_CHIP_ID_BCM53573:
case BCMA_CHIP_ID_BCM47094:
chip->ngpio = 32;
break;
diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c
index 245a879b036e..255591ab3716 100644
--- a/drivers/block/DAC960.c
+++ b/drivers/block/DAC960.c
@@ -1678,9 +1678,12 @@ static bool DAC960_V1_ReadControllerConfiguration(DAC960_Controller_T
Enquiry2->FirmwareID.FirmwareType = '0';
Enquiry2->FirmwareID.TurnID = 0;
}
- sprintf(Controller->FirmwareVersion, "%d.%02d-%c-%02d",
- Enquiry2->FirmwareID.MajorVersion, Enquiry2->FirmwareID.MinorVersion,
- Enquiry2->FirmwareID.FirmwareType, Enquiry2->FirmwareID.TurnID);
+ snprintf(Controller->FirmwareVersion, sizeof(Controller->FirmwareVersion),
+ "%d.%02d-%c-%02d",
+ Enquiry2->FirmwareID.MajorVersion,
+ Enquiry2->FirmwareID.MinorVersion,
+ Enquiry2->FirmwareID.FirmwareType,
+ Enquiry2->FirmwareID.TurnID);
if (!((Controller->FirmwareVersion[0] == '5' &&
strcmp(Controller->FirmwareVersion, "5.06") >= 0) ||
(Controller->FirmwareVersion[0] == '4' &&
@@ -6588,7 +6591,8 @@ static void DAC960_CreateProcEntries(DAC960_Controller_T *Controller)
&dac960_proc_fops);
}
- sprintf(Controller->ControllerName, "c%d", Controller->ControllerNumber);
+ snprintf(Controller->ControllerName, sizeof(Controller->ControllerName),
+ "c%d", Controller->ControllerNumber);
ControllerProcEntry = proc_mkdir(Controller->ControllerName,
DAC960_ProcDirectoryEntry);
proc_create_data("initial_status", 0, ControllerProcEntry, &dac960_initial_status_proc_fops, Controller);
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index 8ddc98279c8f..4a438b8abe27 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -17,6 +17,7 @@ if BLK_DEV
config BLK_DEV_NULL_BLK
tristate "Null test block driver"
+ depends on CONFIGFS_FS
config BLK_DEV_FD
tristate "Normal floppy disk support"
@@ -111,33 +112,6 @@ source "drivers/block/mtip32xx/Kconfig"
source "drivers/block/zram/Kconfig"
-config BLK_CPQ_CISS_DA
- tristate "Compaq Smart Array 5xxx support"
- depends on PCI
- select CHECK_SIGNATURE
- select BLK_SCSI_REQUEST
- help
- This is the driver for Compaq Smart Array 5xxx controllers.
- Everyone using these boards should say Y here.
- See <file:Documentation/blockdev/cciss.txt> for the current list of
- boards supported by this driver, and for further information
- on the use of this driver.
-
-config CISS_SCSI_TAPE
- bool "SCSI tape drive support for Smart Array 5xxx"
- depends on BLK_CPQ_CISS_DA && PROC_FS
- depends on SCSI=y || SCSI=BLK_CPQ_CISS_DA
- help
- When enabled (Y), this option allows SCSI tape drives and SCSI medium
- changers (tape robots) to be accessed via a Compaq 5xxx array
- controller. (See <file:Documentation/blockdev/cciss.txt> for more details.)
-
- "SCSI support" and "SCSI tape support" must also be enabled for this
- option to work.
-
- When this option is disabled (N), the SCSI portion of the driver
- is not compiled.
-
config BLK_DEV_DAC960
tristate "Mylex DAC960/DAC1100 PCI RAID Controller support"
depends on PCI
@@ -470,7 +444,7 @@ config VIRTIO_BLK
depends on VIRTIO
---help---
This is the virtual block driver for virtio. It can be used with
- lguest or QEMU based VMMs (like KVM or Xen). Say Y or M.
+ QEMU based VMMs (like KVM or Xen). Say Y or M.
config VIRTIO_BLK_SCSI
bool "SCSI passthrough request for the Virtio block driver"
diff --git a/drivers/block/Makefile b/drivers/block/Makefile
index ec8c36897b75..1f456d86a190 100644
--- a/drivers/block/Makefile
+++ b/drivers/block/Makefile
@@ -15,7 +15,6 @@ obj-$(CONFIG_ATARI_FLOPPY) += ataflop.o
obj-$(CONFIG_AMIGA_Z2RAM) += z2ram.o
obj-$(CONFIG_BLK_DEV_RAM) += brd.o
obj-$(CONFIG_BLK_DEV_LOOP) += loop.o
-obj-$(CONFIG_BLK_CPQ_CISS_DA) += cciss.o
obj-$(CONFIG_BLK_DEV_DAC960) += DAC960.o
obj-$(CONFIG_XILINX_SYSACE) += xsysace.o
obj-$(CONFIG_CDROM_PKTCDVD) += pktcdvd.o
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index 104b71c0490d..bbd0d186cfc0 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -294,14 +294,13 @@ out:
static blk_qc_t brd_make_request(struct request_queue *q, struct bio *bio)
{
- struct block_device *bdev = bio->bi_bdev;
- struct brd_device *brd = bdev->bd_disk->private_data;
+ struct brd_device *brd = bio->bi_disk->private_data;
struct bio_vec bvec;
sector_t sector;
struct bvec_iter iter;
sector = bio->bi_iter.bi_sector;
- if (bio_end_sector(bio) > get_capacity(bdev->bd_disk))
+ if (bio_end_sector(bio) > get_capacity(bio->bi_disk))
goto io_error;
bio_for_each_segment(bvec, bio, iter) {
@@ -326,7 +325,11 @@ static int brd_rw_page(struct block_device *bdev, sector_t sector,
struct page *page, bool is_write)
{
struct brd_device *brd = bdev->bd_disk->private_data;
- int err = brd_do_bvec(brd, page, PAGE_SIZE, 0, is_write, sector);
+ int err;
+
+ if (PageTransHuge(page))
+ return -ENOTSUPP;
+ err = brd_do_bvec(brd, page, PAGE_SIZE, 0, is_write, sector);
page_endio(page, is_write, err);
return err;
}
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
deleted file mode 100644
index 678af946be30..000000000000
--- a/drivers/block/cciss.c
+++ /dev/null
@@ -1,5415 +0,0 @@
-/*
- * Disk Array driver for HP Smart Array controllers.
- * (C) Copyright 2000, 2007 Hewlett-Packard Development Company, L.P.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- * 02111-1307, USA.
- *
- * Questions/Comments/Bugfixes to iss_storagedev@hp.com
- *
- */
-
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/types.h>
-#include <linux/pci.h>
-#include <linux/pci-aspm.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/delay.h>
-#include <linux/major.h>
-#include <linux/fs.h>
-#include <linux/bio.h>
-#include <linux/blkpg.h>
-#include <linux/timer.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <linux/init.h>
-#include <linux/jiffies.h>
-#include <linux/hdreg.h>
-#include <linux/spinlock.h>
-#include <linux/compat.h>
-#include <linux/mutex.h>
-#include <linux/bitmap.h>
-#include <linux/io.h>
-#include <linux/uaccess.h>
-
-#include <linux/dma-mapping.h>
-#include <linux/blkdev.h>
-#include <linux/genhd.h>
-#include <linux/completion.h>
-#include <scsi/scsi.h>
-#include <scsi/sg.h>
-#include <scsi/scsi_ioctl.h>
-#include <scsi/scsi_request.h>
-#include <linux/cdrom.h>
-#include <linux/scatterlist.h>
-#include <linux/kthread.h>
-
-#define CCISS_DRIVER_VERSION(maj,min,submin) ((maj<<16)|(min<<8)|(submin))
-#define DRIVER_NAME "HP CISS Driver (v 3.6.26)"
-#define DRIVER_VERSION CCISS_DRIVER_VERSION(3, 6, 26)
-
-/* Embedded module documentation macros - see modules.h */
-MODULE_AUTHOR("Hewlett-Packard Company");
-MODULE_DESCRIPTION("Driver for HP Smart Array Controllers");
-MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers");
-MODULE_VERSION("3.6.26");
-MODULE_LICENSE("GPL");
-static int cciss_tape_cmds = 6;
-module_param(cciss_tape_cmds, int, 0644);
-MODULE_PARM_DESC(cciss_tape_cmds,
- "number of commands to allocate for tape devices (default: 6)");
-static int cciss_simple_mode;
-module_param(cciss_simple_mode, int, S_IRUGO|S_IWUSR);
-MODULE_PARM_DESC(cciss_simple_mode,
- "Use 'simple mode' rather than 'performant mode'");
-
-static int cciss_allow_hpsa;
-module_param(cciss_allow_hpsa, int, S_IRUGO|S_IWUSR);
-MODULE_PARM_DESC(cciss_allow_hpsa,
- "Prevent cciss driver from accessing hardware known to be "
- " supported by the hpsa driver");
-
-static DEFINE_MUTEX(cciss_mutex);
-static struct proc_dir_entry *proc_cciss;
-
-#include "cciss_cmd.h"
-#include "cciss.h"
-#include <linux/cciss_ioctl.h>
-
-/* define the PCI info for the cards we can control */
-static const struct pci_device_id cciss_pci_device_id[] = {
- {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISS, 0x0E11, 0x4070},
- {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB, 0x0E11, 0x4080},
- {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB, 0x0E11, 0x4082},
- {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB, 0x0E11, 0x4083},
- {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x4091},
- {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409A},
- {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409B},
- {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409C},
- {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409D},
- {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSA, 0x103C, 0x3225},
- {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3223},
- {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3234},
- {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3235},
- {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3211},
- {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3212},
- {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3213},
- {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3214},
- {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3215},
- {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3237},
- {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x323D},
- {0,}
-};
-
-MODULE_DEVICE_TABLE(pci, cciss_pci_device_id);
-
-/* board_id = Subsystem Device ID & Vendor ID
- * product = Marketing Name for the board
- * access = Address of the struct of function pointers
- */
-static struct board_type products[] = {
- {0x40700E11, "Smart Array 5300", &SA5_access},
- {0x40800E11, "Smart Array 5i", &SA5B_access},
- {0x40820E11, "Smart Array 532", &SA5B_access},
- {0x40830E11, "Smart Array 5312", &SA5B_access},
- {0x409A0E11, "Smart Array 641", &SA5_access},
- {0x409B0E11, "Smart Array 642", &SA5_access},
- {0x409C0E11, "Smart Array 6400", &SA5_access},
- {0x409D0E11, "Smart Array 6400 EM", &SA5_access},
- {0x40910E11, "Smart Array 6i", &SA5_access},
- {0x3225103C, "Smart Array P600", &SA5_access},
- {0x3223103C, "Smart Array P800", &SA5_access},
- {0x3234103C, "Smart Array P400", &SA5_access},
- {0x3235103C, "Smart Array P400i", &SA5_access},
- {0x3211103C, "Smart Array E200i", &SA5_access},
- {0x3212103C, "Smart Array E200", &SA5_access},
- {0x3213103C, "Smart Array E200i", &SA5_access},
- {0x3214103C, "Smart Array E200i", &SA5_access},
- {0x3215103C, "Smart Array E200i", &SA5_access},
- {0x3237103C, "Smart Array E500", &SA5_access},
- {0x323D103C, "Smart Array P700m", &SA5_access},
-};
-
-/* How long to wait (in milliseconds) for board to go into simple mode */
-#define MAX_CONFIG_WAIT 30000
-#define MAX_IOCTL_CONFIG_WAIT 1000
-
-/*define how many times we will try a command because of bus resets */
-#define MAX_CMD_RETRIES 3
-
-#define MAX_CTLR 32
-
-/* Originally cciss driver only supports 8 major numbers */
-#define MAX_CTLR_ORIG 8
-
-static ctlr_info_t *hba[MAX_CTLR];
-
-static struct task_struct *cciss_scan_thread;
-static DEFINE_MUTEX(scan_mutex);
-static LIST_HEAD(scan_q);
-
-static void do_cciss_request(struct request_queue *q);
-static irqreturn_t do_cciss_intx(int irq, void *dev_id);
-static irqreturn_t do_cciss_msix_intr(int irq, void *dev_id);
-static int cciss_open(struct block_device *bdev, fmode_t mode);
-static int cciss_unlocked_open(struct block_device *bdev, fmode_t mode);
-static void cciss_release(struct gendisk *disk, fmode_t mode);
-static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
- unsigned int cmd, unsigned long arg);
-static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo);
-
-static int cciss_revalidate(struct gendisk *disk);
-static int rebuild_lun_table(ctlr_info_t *h, int first_time, int via_ioctl);
-static int deregister_disk(ctlr_info_t *h, int drv_index,
- int clear_all, int via_ioctl);
-
-static void cciss_read_capacity(ctlr_info_t *h, int logvol,
- sector_t *total_size, unsigned int *block_size);
-static void cciss_read_capacity_16(ctlr_info_t *h, int logvol,
- sector_t *total_size, unsigned int *block_size);
-static void cciss_geometry_inquiry(ctlr_info_t *h, int logvol,
- sector_t total_size,
- unsigned int block_size, InquiryData_struct *inq_buff,
- drive_info_struct *drv);
-static void cciss_interrupt_mode(ctlr_info_t *);
-static int cciss_enter_simple_mode(struct ctlr_info *h);
-static void start_io(ctlr_info_t *h);
-static int sendcmd_withirq(ctlr_info_t *h, __u8 cmd, void *buff, size_t size,
- __u8 page_code, unsigned char scsi3addr[],
- int cmd_type);
-static int sendcmd_withirq_core(ctlr_info_t *h, CommandList_struct *c,
- int attempt_retry);
-static int process_sendcmd_error(ctlr_info_t *h, CommandList_struct *c);
-
-static int add_to_scan_list(struct ctlr_info *h);
-static int scan_thread(void *data);
-static int check_for_unit_attention(ctlr_info_t *h, CommandList_struct *c);
-static void cciss_hba_release(struct device *dev);
-static void cciss_device_release(struct device *dev);
-static void cciss_free_gendisk(ctlr_info_t *h, int drv_index);
-static void cciss_free_drive_info(ctlr_info_t *h, int drv_index);
-static inline u32 next_command(ctlr_info_t *h);
-static int cciss_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
- u32 *cfg_base_addr, u64 *cfg_base_addr_index,
- u64 *cfg_offset);
-static int cciss_pci_find_memory_BAR(struct pci_dev *pdev,
- unsigned long *memory_bar);
-static inline u32 cciss_tag_discard_error_bits(ctlr_info_t *h, u32 tag);
-static int write_driver_ver_to_cfgtable(CfgTable_struct __iomem *cfgtable);
-
-/* performant mode helper functions */
-static void calc_bucket_map(int *bucket, int num_buckets, int nsgs,
- int *bucket_map);
-static void cciss_put_controller_into_performant_mode(ctlr_info_t *h);
-
-#ifdef CONFIG_PROC_FS
-static void cciss_procinit(ctlr_info_t *h);
-#else
-static void cciss_procinit(ctlr_info_t *h)
-{
-}
-#endif /* CONFIG_PROC_FS */
-
-#ifdef CONFIG_COMPAT
-static int cciss_compat_ioctl(struct block_device *, fmode_t,
- unsigned, unsigned long);
-#endif
-
-static const struct block_device_operations cciss_fops = {
- .owner = THIS_MODULE,
- .open = cciss_unlocked_open,
- .release = cciss_release,
- .ioctl = cciss_ioctl,
- .getgeo = cciss_getgeo,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = cciss_compat_ioctl,
-#endif
- .revalidate_disk = cciss_revalidate,
-};
-
-/* set_performant_mode: Modify the tag for cciss performant
- * set bit 0 for pull model, bits 3-1 for block fetch
- * register number
- */
-static void set_performant_mode(ctlr_info_t *h, CommandList_struct *c)
-{
- if (likely(h->transMethod & CFGTBL_Trans_Performant))
- c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
-}
-
-/*
- * Enqueuing and dequeuing functions for cmdlists.
- */
-static inline void addQ(struct list_head *list, CommandList_struct *c)
-{
- list_add_tail(&c->list, list);
-}
-
-static inline void removeQ(CommandList_struct *c)
-{
- /*
- * After kexec/dump some commands might still
- * be in flight, which the firmware will try
- * to complete. Resetting the firmware doesn't work
- * with old fw revisions, so we have to mark
- * them off as 'stale' to prevent the driver from
- * falling over.
- */
- if (WARN_ON(list_empty(&c->list))) {
- c->cmd_type = CMD_MSG_STALE;
- return;
- }
-
- list_del_init(&c->list);
-}
-
-static void enqueue_cmd_and_start_io(ctlr_info_t *h,
- CommandList_struct *c)
-{
- unsigned long flags;
- set_performant_mode(h, c);
- spin_lock_irqsave(&h->lock, flags);
- addQ(&h->reqQ, c);
- h->Qdepth++;
- if (h->Qdepth > h->maxQsinceinit)
- h->maxQsinceinit = h->Qdepth;
- start_io(h);
- spin_unlock_irqrestore(&h->lock, flags);
-}
-
-static void cciss_free_sg_chain_blocks(SGDescriptor_struct **cmd_sg_list,
- int nr_cmds)
-{
- int i;
-
- if (!cmd_sg_list)
- return;
- for (i = 0; i < nr_cmds; i++) {
- kfree(cmd_sg_list[i]);
- cmd_sg_list[i] = NULL;
- }
- kfree(cmd_sg_list);
-}
-
-static SGDescriptor_struct **cciss_allocate_sg_chain_blocks(
- ctlr_info_t *h, int chainsize, int nr_cmds)
-{
- int j;
- SGDescriptor_struct **cmd_sg_list;
-
- if (chainsize <= 0)
- return NULL;
-
- cmd_sg_list = kmalloc(sizeof(*cmd_sg_list) * nr_cmds, GFP_KERNEL);
- if (!cmd_sg_list)
- return NULL;
-
- /* Build up chain blocks for each command */
- for (j = 0; j < nr_cmds; j++) {
- /* Need a block of chainsized s/g elements. */
- cmd_sg_list[j] = kmalloc((chainsize *
- sizeof(*cmd_sg_list[j])), GFP_KERNEL);
- if (!cmd_sg_list[j]) {
- dev_err(&h->pdev->dev, "Cannot get memory "
- "for s/g chains.\n");
- goto clean;
- }
- }
- return cmd_sg_list;
-clean:
- cciss_free_sg_chain_blocks(cmd_sg_list, nr_cmds);
- return NULL;
-}
-
-static void cciss_unmap_sg_chain_block(ctlr_info_t *h, CommandList_struct *c)
-{
- SGDescriptor_struct *chain_sg;
- u64bit temp64;
-
- if (c->Header.SGTotal <= h->max_cmd_sgentries)
- return;
-
- chain_sg = &c->SG[h->max_cmd_sgentries - 1];
- temp64.val32.lower = chain_sg->Addr.lower;
- temp64.val32.upper = chain_sg->Addr.upper;
- pci_unmap_single(h->pdev, temp64.val, chain_sg->Len, PCI_DMA_TODEVICE);
-}
-
-static int cciss_map_sg_chain_block(ctlr_info_t *h, CommandList_struct *c,
- SGDescriptor_struct *chain_block, int len)
-{
- SGDescriptor_struct *chain_sg;
- u64bit temp64;
-
- chain_sg = &c->SG[h->max_cmd_sgentries - 1];
- chain_sg->Ext = CCISS_SG_CHAIN;
- chain_sg->Len = len;
- temp64.val = pci_map_single(h->pdev, chain_block, len,
- PCI_DMA_TODEVICE);
- if (dma_mapping_error(&h->pdev->dev, temp64.val)) {
- dev_warn(&h->pdev->dev,
- "%s: error mapping chain block for DMA\n",
- __func__);
- return -1;
- }
- chain_sg->Addr.lower = temp64.val32.lower;
- chain_sg->Addr.upper = temp64.val32.upper;
-
- return 0;
-}
-
-#include "cciss_scsi.c" /* For SCSI tape support */
-
-static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG",
- "UNKNOWN"
-};
-#define RAID_UNKNOWN (ARRAY_SIZE(raid_label)-1)
-
-#ifdef CONFIG_PROC_FS
-
-/*
- * Report information about this controller.
- */
-#define ENG_GIG 1000000000
-#define ENG_GIG_FACTOR (ENG_GIG/512)
-#define ENGAGE_SCSI "engage scsi"
-
-static void cciss_seq_show_header(struct seq_file *seq)
-{
- ctlr_info_t *h = seq->private;
-
- seq_printf(seq, "%s: HP %s Controller\n"
- "Board ID: 0x%08lx\n"
- "Firmware Version: %c%c%c%c\n"
- "IRQ: %d\n"
- "Logical drives: %d\n"
- "Current Q depth: %d\n"
- "Current # commands on controller: %d\n"
- "Max Q depth since init: %d\n"
- "Max # commands on controller since init: %d\n"
- "Max SG entries since init: %d\n",
- h->devname,
- h->product_name,
- (unsigned long)h->board_id,
- h->firm_ver[0], h->firm_ver[1], h->firm_ver[2],
- h->firm_ver[3], (unsigned int)h->intr[h->intr_mode],
- h->num_luns,
- h->Qdepth, h->commands_outstanding,
- h->maxQsinceinit, h->max_outstanding, h->maxSG);
-
-#ifdef CONFIG_CISS_SCSI_TAPE
- cciss_seq_tape_report(seq, h);
-#endif /* CONFIG_CISS_SCSI_TAPE */
-}
-
-static void *cciss_seq_start(struct seq_file *seq, loff_t *pos)
-{
- ctlr_info_t *h = seq->private;
- unsigned long flags;
-
- /* prevent displaying bogus info during configuration
- * or deconfiguration of a logical volume
- */
- spin_lock_irqsave(&h->lock, flags);
- if (h->busy_configuring) {
- spin_unlock_irqrestore(&h->lock, flags);
- return ERR_PTR(-EBUSY);
- }
- h->busy_configuring = 1;
- spin_unlock_irqrestore(&h->lock, flags);
-
- if (*pos == 0)
- cciss_seq_show_header(seq);
-
- return pos;
-}
-
-static int cciss_seq_show(struct seq_file *seq, void *v)
-{
- sector_t vol_sz, vol_sz_frac;
- ctlr_info_t *h = seq->private;
- unsigned ctlr = h->ctlr;
- loff_t *pos = v;
- drive_info_struct *drv = h->drv[*pos];
-
- if (*pos > h->highest_lun)
- return 0;
-
- if (drv == NULL) /* it's possible for h->drv[] to have holes. */
- return 0;
-
- if (drv->heads == 0)
- return 0;
-
- vol_sz = drv->nr_blocks;
- vol_sz_frac = sector_div(vol_sz, ENG_GIG_FACTOR);
- vol_sz_frac *= 100;
- sector_div(vol_sz_frac, ENG_GIG_FACTOR);
-
- if (drv->raid_level < 0 || drv->raid_level > RAID_UNKNOWN)
- drv->raid_level = RAID_UNKNOWN;
- seq_printf(seq, "cciss/c%dd%d:"
- "\t%4u.%02uGB\tRAID %s\n",
- ctlr, (int) *pos, (int)vol_sz, (int)vol_sz_frac,
- raid_label[drv->raid_level]);
- return 0;
-}
-
-static void *cciss_seq_next(struct seq_file *seq, void *v, loff_t *pos)
-{
- ctlr_info_t *h = seq->private;
-
- if (*pos > h->highest_lun)
- return NULL;
- *pos += 1;
-
- return pos;
-}
-
-static void cciss_seq_stop(struct seq_file *seq, void *v)
-{
- ctlr_info_t *h = seq->private;
-
- /* Only reset h->busy_configuring if we succeeded in setting
- * it during cciss_seq_start. */
- if (v == ERR_PTR(-EBUSY))
- return;
-
- h->busy_configuring = 0;
-}
-
-static const struct seq_operations cciss_seq_ops = {
- .start = cciss_seq_start,
- .show = cciss_seq_show,
- .next = cciss_seq_next,
- .stop = cciss_seq_stop,
-};
-
-static int cciss_seq_open(struct inode *inode, struct file *file)
-{
- int ret = seq_open(file, &cciss_seq_ops);
- struct seq_file *seq = file->private_data;
-
- if (!ret)
- seq->private = PDE_DATA(inode);
-
- return ret;
-}
-
-static ssize_t
-cciss_proc_write(struct file *file, const char __user *buf,
- size_t length, loff_t *ppos)
-{
- int err;
- char *buffer;
-
-#ifndef CONFIG_CISS_SCSI_TAPE
- return -EINVAL;
-#endif
-
- if (!buf || length > PAGE_SIZE - 1)
- return -EINVAL;
-
- buffer = memdup_user_nul(buf, length);
- if (IS_ERR(buffer))
- return PTR_ERR(buffer);
-
-#ifdef CONFIG_CISS_SCSI_TAPE
- if (strncmp(ENGAGE_SCSI, buffer, sizeof ENGAGE_SCSI - 1) == 0) {
- struct seq_file *seq = file->private_data;
- ctlr_info_t *h = seq->private;
-
- err = cciss_engage_scsi(h);
- if (err == 0)
- err = length;
- } else
-#endif /* CONFIG_CISS_SCSI_TAPE */
- err = -EINVAL;
- /* might be nice to have "disengage" too, but it's not
- safely possible. (only 1 module use count, lock issues.) */
-
- kfree(buffer);
- return err;
-}
-
-static const struct file_operations cciss_proc_fops = {
- .owner = THIS_MODULE,
- .open = cciss_seq_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release,
- .write = cciss_proc_write,
-};
-
-static void cciss_procinit(ctlr_info_t *h)
-{
- struct proc_dir_entry *pde;
-
- if (proc_cciss == NULL)
- proc_cciss = proc_mkdir("driver/cciss", NULL);
- if (!proc_cciss)
- return;
- pde = proc_create_data(h->devname, S_IWUSR | S_IRUSR | S_IRGRP |
- S_IROTH, proc_cciss,
- &cciss_proc_fops, h);
-}
-#endif /* CONFIG_PROC_FS */
-
-#define MAX_PRODUCT_NAME_LEN 19
-
-#define to_hba(n) container_of(n, struct ctlr_info, dev)
-#define to_drv(n) container_of(n, drive_info_struct, dev)
-
-/* List of controllers which cannot be hard reset on kexec with reset_devices */
-static u32 unresettable_controller[] = {
- 0x3223103C, /* Smart Array P800 */
- 0x3234103C, /* Smart Array P400 */
- 0x3235103C, /* Smart Array P400i */
- 0x3211103C, /* Smart Array E200i */
- 0x3212103C, /* Smart Array E200 */
- 0x3213103C, /* Smart Array E200i */
- 0x3214103C, /* Smart Array E200i */
- 0x3215103C, /* Smart Array E200i */
- 0x3237103C, /* Smart Array E500 */
- 0x323D103C, /* Smart Array P700m */
- 0x40800E11, /* Smart Array 5i */
- 0x409C0E11, /* Smart Array 6400 */
- 0x409D0E11, /* Smart Array 6400 EM */
- 0x40700E11, /* Smart Array 5300 */
- 0x40820E11, /* Smart Array 532 */
- 0x40830E11, /* Smart Array 5312 */
- 0x409A0E11, /* Smart Array 641 */
- 0x409B0E11, /* Smart Array 642 */
- 0x40910E11, /* Smart Array 6i */
-};
-
-/* List of controllers which cannot even be soft reset */
-static u32 soft_unresettable_controller[] = {
- 0x40800E11, /* Smart Array 5i */
- 0x40700E11, /* Smart Array 5300 */
- 0x40820E11, /* Smart Array 532 */
- 0x40830E11, /* Smart Array 5312 */
- 0x409A0E11, /* Smart Array 641 */
- 0x409B0E11, /* Smart Array 642 */
- 0x40910E11, /* Smart Array 6i */
- /* Exclude 640x boards. These are two pci devices in one slot
- * which share a battery backed cache module. One controls the
- * cache, the other accesses the cache through the one that controls
- * it. If we reset the one controlling the cache, the other will
- * likely not be happy. Just forbid resetting this conjoined mess.
- */
- 0x409C0E11, /* Smart Array 6400 */
- 0x409D0E11, /* Smart Array 6400 EM */
-};
-
-static int ctlr_is_hard_resettable(u32 board_id)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(unresettable_controller); i++)
- if (unresettable_controller[i] == board_id)
- return 0;
- return 1;
-}
-
-static int ctlr_is_soft_resettable(u32 board_id)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(soft_unresettable_controller); i++)
- if (soft_unresettable_controller[i] == board_id)
- return 0;
- return 1;
-}
-
-static int ctlr_is_resettable(u32 board_id)
-{
- return ctlr_is_hard_resettable(board_id) ||
- ctlr_is_soft_resettable(board_id);
-}
-
-static ssize_t host_show_resettable(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct ctlr_info *h = to_hba(dev);
-
- return snprintf(buf, 20, "%d\n", ctlr_is_resettable(h->board_id));
-}
-static DEVICE_ATTR(resettable, S_IRUGO, host_show_resettable, NULL);
-
-static ssize_t host_store_rescan(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct ctlr_info *h = to_hba(dev);
-
- add_to_scan_list(h);
- wake_up_process(cciss_scan_thread);
- wait_for_completion_interruptible(&h->scan_wait);
-
- return count;
-}
-static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan);
-
-static ssize_t host_show_transport_mode(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct ctlr_info *h = to_hba(dev);
-
- return snprintf(buf, 20, "%s\n",
- h->transMethod & CFGTBL_Trans_Performant ?
- "performant" : "simple");
-}
-static DEVICE_ATTR(transport_mode, S_IRUGO, host_show_transport_mode, NULL);
-
-static ssize_t dev_show_unique_id(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- drive_info_struct *drv = to_drv(dev);
- struct ctlr_info *h = to_hba(drv->dev.parent);
- __u8 sn[16];
- unsigned long flags;
- int ret = 0;
-
- spin_lock_irqsave(&h->lock, flags);
- if (h->busy_configuring)
- ret = -EBUSY;
- else
- memcpy(sn, drv->serial_no, sizeof(sn));
- spin_unlock_irqrestore(&h->lock, flags);
-
- if (ret)
- return ret;
- else
- return snprintf(buf, 16 * 2 + 2,
- "%02X%02X%02X%02X%02X%02X%02X%02X"
- "%02X%02X%02X%02X%02X%02X%02X%02X\n",
- sn[0], sn[1], sn[2], sn[3],
- sn[4], sn[5], sn[6], sn[7],
- sn[8], sn[9], sn[10], sn[11],
- sn[12], sn[13], sn[14], sn[15]);
-}
-static DEVICE_ATTR(unique_id, S_IRUGO, dev_show_unique_id, NULL);
-
-static ssize_t dev_show_vendor(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- drive_info_struct *drv = to_drv(dev);
- struct ctlr_info *h = to_hba(drv->dev.parent);
- char vendor[VENDOR_LEN + 1];
- unsigned long flags;
- int ret = 0;
-
- spin_lock_irqsave(&h->lock, flags);
- if (h->busy_configuring)
- ret = -EBUSY;
- else
- memcpy(vendor, drv->vendor, VENDOR_LEN + 1);
- spin_unlock_irqrestore(&h->lock, flags);
-
- if (ret)
- return ret;
- else
- return snprintf(buf, sizeof(vendor) + 1, "%s\n", drv->vendor);
-}
-static DEVICE_ATTR(vendor, S_IRUGO, dev_show_vendor, NULL);
-
-static ssize_t dev_show_model(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- drive_info_struct *drv = to_drv(dev);
- struct ctlr_info *h = to_hba(drv->dev.parent);
- char model[MODEL_LEN + 1];
- unsigned long flags;
- int ret = 0;
-
- spin_lock_irqsave(&h->lock, flags);
- if (h->busy_configuring)
- ret = -EBUSY;
- else
- memcpy(model, drv->model, MODEL_LEN + 1);
- spin_unlock_irqrestore(&h->lock, flags);
-
- if (ret)
- return ret;
- else
- return snprintf(buf, sizeof(model) + 1, "%s\n", drv->model);
-}
-static DEVICE_ATTR(model, S_IRUGO, dev_show_model, NULL);
-
-static ssize_t dev_show_rev(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- drive_info_struct *drv = to_drv(dev);
- struct ctlr_info *h = to_hba(drv->dev.parent);
- char rev[REV_LEN + 1];
- unsigned long flags;
- int ret = 0;
-
- spin_lock_irqsave(&h->lock, flags);
- if (h->busy_configuring)
- ret = -EBUSY;
- else
- memcpy(rev, drv->rev, REV_LEN + 1);
- spin_unlock_irqrestore(&h->lock, flags);
-
- if (ret)
- return ret;
- else
- return snprintf(buf, sizeof(rev) + 1, "%s\n", drv->rev);
-}
-static DEVICE_ATTR(rev, S_IRUGO, dev_show_rev, NULL);
-
-static ssize_t cciss_show_lunid(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- drive_info_struct *drv = to_drv(dev);
- struct ctlr_info *h = to_hba(drv->dev.parent);
- unsigned long flags;
- unsigned char lunid[8];
-
- spin_lock_irqsave(&h->lock, flags);
- if (h->busy_configuring) {
- spin_unlock_irqrestore(&h->lock, flags);
- return -EBUSY;
- }
- if (!drv->heads) {
- spin_unlock_irqrestore(&h->lock, flags);
- return -ENOTTY;
- }
- memcpy(lunid, drv->LunID, sizeof(lunid));
- spin_unlock_irqrestore(&h->lock, flags);
- return snprintf(buf, 20, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
- lunid[0], lunid[1], lunid[2], lunid[3],
- lunid[4], lunid[5], lunid[6], lunid[7]);
-}
-static DEVICE_ATTR(lunid, S_IRUGO, cciss_show_lunid, NULL);
-
-static ssize_t cciss_show_raid_level(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- drive_info_struct *drv = to_drv(dev);
- struct ctlr_info *h = to_hba(drv->dev.parent);
- int raid;
- unsigned long flags;
-
- spin_lock_irqsave(&h->lock, flags);
- if (h->busy_configuring) {
- spin_unlock_irqrestore(&h->lock, flags);
- return -EBUSY;
- }
- raid = drv->raid_level;
- spin_unlock_irqrestore(&h->lock, flags);
- if (raid < 0 || raid > RAID_UNKNOWN)
- raid = RAID_UNKNOWN;
-
- return snprintf(buf, strlen(raid_label[raid]) + 7, "RAID %s\n",
- raid_label[raid]);
-}
-static DEVICE_ATTR(raid_level, S_IRUGO, cciss_show_raid_level, NULL);
-
-static ssize_t cciss_show_usage_count(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- drive_info_struct *drv = to_drv(dev);
- struct ctlr_info *h = to_hba(drv->dev.parent);
- unsigned long flags;
- int count;
-
- spin_lock_irqsave(&h->lock, flags);
- if (h->busy_configuring) {
- spin_unlock_irqrestore(&h->lock, flags);
- return -EBUSY;
- }
- count = drv->usage_count;
- spin_unlock_irqrestore(&h->lock, flags);
- return snprintf(buf, 20, "%d\n", count);
-}
-static DEVICE_ATTR(usage_count, S_IRUGO, cciss_show_usage_count, NULL);
-
-static struct attribute *cciss_host_attrs[] = {
- &dev_attr_rescan.attr,
- &dev_attr_resettable.attr,
- &dev_attr_transport_mode.attr,
- NULL
-};
-
-static struct attribute_group cciss_host_attr_group = {
- .attrs = cciss_host_attrs,
-};
-
-static const struct attribute_group *cciss_host_attr_groups[] = {
- &cciss_host_attr_group,
- NULL
-};
-
-static struct device_type cciss_host_type = {
- .name = "cciss_host",
- .groups = cciss_host_attr_groups,
- .release = cciss_hba_release,
-};
-
-static struct attribute *cciss_dev_attrs[] = {
- &dev_attr_unique_id.attr,
- &dev_attr_model.attr,
- &dev_attr_vendor.attr,
- &dev_attr_rev.attr,
- &dev_attr_lunid.attr,
- &dev_attr_raid_level.attr,
- &dev_attr_usage_count.attr,
- NULL
-};
-
-static struct attribute_group cciss_dev_attr_group = {
- .attrs = cciss_dev_attrs,
-};
-
-static const struct attribute_group *cciss_dev_attr_groups[] = {
- &cciss_dev_attr_group,
- NULL
-};
-
-static struct device_type cciss_dev_type = {
- .name = "cciss_device",
- .groups = cciss_dev_attr_groups,
- .release = cciss_device_release,
-};
-
-static struct bus_type cciss_bus_type = {
- .name = "cciss",
-};
-
-/*
- * cciss_hba_release is called when the reference count
- * of h->dev goes to zero.
- */
-static void cciss_hba_release(struct device *dev)
-{
- /*
- * nothing to do, but need this to avoid a warning
- * about not having a release handler from lib/kref.c.
- */
-}
-
-/*
- * Initialize sysfs entry for each controller. This sets up and registers
- * the 'cciss#' directory for each individual controller under
- * /sys/bus/pci/devices/<dev>/.
- */
-static int cciss_create_hba_sysfs_entry(struct ctlr_info *h)
-{
- device_initialize(&h->dev);
- h->dev.type = &cciss_host_type;
- h->dev.bus = &cciss_bus_type;
- dev_set_name(&h->dev, "%s", h->devname);
- h->dev.parent = &h->pdev->dev;
-
- return device_add(&h->dev);
-}
-
-/*
- * Remove sysfs entries for an hba.
- */
-static void cciss_destroy_hba_sysfs_entry(struct ctlr_info *h)
-{
- device_del(&h->dev);
- put_device(&h->dev); /* final put. */
-}
-
-/* cciss_device_release is called when the reference count
- * of h->drv[x]dev goes to zero.
- */
-static void cciss_device_release(struct device *dev)
-{
- drive_info_struct *drv = to_drv(dev);
- kfree(drv);
-}
-
-/*
- * Initialize sysfs for each logical drive. This sets up and registers
- * the 'c#d#' directory for each individual logical drive under
- * /sys/bus/pci/devices/<dev/ccis#/. We also create a link from
- * /sys/block/cciss!c#d# to this entry.
- */
-static long cciss_create_ld_sysfs_entry(struct ctlr_info *h,
- int drv_index)
-{
- struct device *dev;
-
- if (h->drv[drv_index]->device_initialized)
- return 0;
-
- dev = &h->drv[drv_index]->dev;
- device_initialize(dev);
- dev->type = &cciss_dev_type;
- dev->bus = &cciss_bus_type;
- dev_set_name(dev, "c%dd%d", h->ctlr, drv_index);
- dev->parent = &h->dev;
- h->drv[drv_index]->device_initialized = 1;
- return device_add(dev);
-}
-
-/*
- * Remove sysfs entries for a logical drive.
- */
-static void cciss_destroy_ld_sysfs_entry(struct ctlr_info *h, int drv_index,
- int ctlr_exiting)
-{
- struct device *dev = &h->drv[drv_index]->dev;
-
- /* special case for c*d0, we only destroy it on controller exit */
- if (drv_index == 0 && !ctlr_exiting)
- return;
-
- device_del(dev);
- put_device(dev); /* the "final" put. */
- h->drv[drv_index] = NULL;
-}
-
-/*
- * For operations that cannot sleep, a command block is allocated at init,
- * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
- * which ones are free or in use.
- */
-static CommandList_struct *cmd_alloc(ctlr_info_t *h)
-{
- CommandList_struct *c;
- int i;
- u64bit temp64;
- dma_addr_t cmd_dma_handle, err_dma_handle;
-
- do {
- i = find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds);
- if (i == h->nr_cmds)
- return NULL;
- } while (test_and_set_bit(i, h->cmd_pool_bits) != 0);
- c = h->cmd_pool + i;
- memset(c, 0, sizeof(CommandList_struct));
- cmd_dma_handle = h->cmd_pool_dhandle + i * sizeof(CommandList_struct);
- c->err_info = h->errinfo_pool + i;
- memset(c->err_info, 0, sizeof(ErrorInfo_struct));
- err_dma_handle = h->errinfo_pool_dhandle
- + i * sizeof(ErrorInfo_struct);
- h->nr_allocs++;
-
- c->cmdindex = i;
-
- INIT_LIST_HEAD(&c->list);
- c->busaddr = (__u32) cmd_dma_handle;
- temp64.val = (__u64) err_dma_handle;
- c->ErrDesc.Addr.lower = temp64.val32.lower;
- c->ErrDesc.Addr.upper = temp64.val32.upper;
- c->ErrDesc.Len = sizeof(ErrorInfo_struct);
-
- c->ctlr = h->ctlr;
- return c;
-}
-
-/* allocate a command using pci_alloc_consistent, used for ioctls,
- * etc., not for the main i/o path.
- */
-static CommandList_struct *cmd_special_alloc(ctlr_info_t *h)
-{
- CommandList_struct *c;
- u64bit temp64;
- dma_addr_t cmd_dma_handle, err_dma_handle;
-
- c = pci_zalloc_consistent(h->pdev, sizeof(CommandList_struct),
- &cmd_dma_handle);
- if (c == NULL)
- return NULL;
-
- c->cmdindex = -1;
-
- c->err_info = pci_zalloc_consistent(h->pdev, sizeof(ErrorInfo_struct),
- &err_dma_handle);
-
- if (c->err_info == NULL) {
- pci_free_consistent(h->pdev,
- sizeof(CommandList_struct), c, cmd_dma_handle);
- return NULL;
- }
-
- INIT_LIST_HEAD(&c->list);
- c->busaddr = (__u32) cmd_dma_handle;
- temp64.val = (__u64) err_dma_handle;
- c->ErrDesc.Addr.lower = temp64.val32.lower;
- c->ErrDesc.Addr.upper = temp64.val32.upper;
- c->ErrDesc.Len = sizeof(ErrorInfo_struct);
-
- c->ctlr = h->ctlr;
- return c;
-}
-
-static void cmd_free(ctlr_info_t *h, CommandList_struct *c)
-{
- int i;
-
- i = c - h->cmd_pool;
- clear_bit(i, h->cmd_pool_bits);
- h->nr_frees++;
-}
-
-static void cmd_special_free(ctlr_info_t *h, CommandList_struct *c)
-{
- u64bit temp64;
-
- temp64.val32.lower = c->ErrDesc.Addr.lower;
- temp64.val32.upper = c->ErrDesc.Addr.upper;
- pci_free_consistent(h->pdev, sizeof(ErrorInfo_struct),
- c->err_info, (dma_addr_t) temp64.val);
- pci_free_consistent(h->pdev, sizeof(CommandList_struct), c,
- (dma_addr_t) cciss_tag_discard_error_bits(h, (u32) c->busaddr));
-}
-
-static inline ctlr_info_t *get_host(struct gendisk *disk)
-{
- return disk->queue->queuedata;
-}
-
-static inline drive_info_struct *get_drv(struct gendisk *disk)
-{
- return disk->private_data;
-}
-
-/*
- * Open. Make sure the device is really there.
- */
-static int cciss_open(struct block_device *bdev, fmode_t mode)
-{
- ctlr_info_t *h = get_host(bdev->bd_disk);
- drive_info_struct *drv = get_drv(bdev->bd_disk);
-
- dev_dbg(&h->pdev->dev, "cciss_open %s\n", bdev->bd_disk->disk_name);
- if (drv->busy_configuring)
- return -EBUSY;
- /*
- * Root is allowed to open raw volume zero even if it's not configured
- * so array config can still work. Root is also allowed to open any
- * volume that has a LUN ID, so it can issue IOCTL to reread the
- * disk information. I don't think I really like this
- * but I'm already using way to many device nodes to claim another one
- * for "raw controller".
- */
- if (drv->heads == 0) {
- if (MINOR(bdev->bd_dev) != 0) { /* not node 0? */
- /* if not node 0 make sure it is a partition = 0 */
- if (MINOR(bdev->bd_dev) & 0x0f) {
- return -ENXIO;
- /* if it is, make sure we have a LUN ID */
- } else if (memcmp(drv->LunID, CTLR_LUNID,
- sizeof(drv->LunID))) {
- return -ENXIO;
- }
- }
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
- }
- drv->usage_count++;
- h->usage_count++;
- return 0;
-}
-
-static int cciss_unlocked_open(struct block_device *bdev, fmode_t mode)
-{
- int ret;
-
- mutex_lock(&cciss_mutex);
- ret = cciss_open(bdev, mode);
- mutex_unlock(&cciss_mutex);
-
- return ret;
-}
-
-/*
- * Close. Sync first.
- */
-static void cciss_release(struct gendisk *disk, fmode_t mode)
-{
- ctlr_info_t *h;
- drive_info_struct *drv;
-
- mutex_lock(&cciss_mutex);
- h = get_host(disk);
- drv = get_drv(disk);
- dev_dbg(&h->pdev->dev, "cciss_release %s\n", disk->disk_name);
- drv->usage_count--;
- h->usage_count--;
- mutex_unlock(&cciss_mutex);
-}
-
-#ifdef CONFIG_COMPAT
-
-static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
- unsigned cmd, unsigned long arg);
-static int cciss_ioctl32_big_passthru(struct block_device *bdev, fmode_t mode,
- unsigned cmd, unsigned long arg);
-
-static int cciss_compat_ioctl(struct block_device *bdev, fmode_t mode,
- unsigned cmd, unsigned long arg)
-{
- switch (cmd) {
- case CCISS_GETPCIINFO:
- case CCISS_GETINTINFO:
- case CCISS_SETINTINFO:
- case CCISS_GETNODENAME:
- case CCISS_SETNODENAME:
- case CCISS_GETHEARTBEAT:
- case CCISS_GETBUSTYPES:
- case CCISS_GETFIRMVER:
- case CCISS_GETDRIVVER:
- case CCISS_REVALIDVOLS:
- case CCISS_DEREGDISK:
- case CCISS_REGNEWDISK:
- case CCISS_REGNEWD:
- case CCISS_RESCANDISK:
- case CCISS_GETLUNINFO:
- return cciss_ioctl(bdev, mode, cmd, arg);
-
- case CCISS_PASSTHRU32:
- return cciss_ioctl32_passthru(bdev, mode, cmd, arg);
- case CCISS_BIG_PASSTHRU32:
- return cciss_ioctl32_big_passthru(bdev, mode, cmd, arg);
-
- default:
- return -ENOIOCTLCMD;
- }
-}
-
-static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
- unsigned cmd, unsigned long arg)
-{
- IOCTL32_Command_struct __user *arg32 =
- (IOCTL32_Command_struct __user *) arg;
- IOCTL_Command_struct arg64;
- IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
- int err;
- u32 cp;
-
- memset(&arg64, 0, sizeof(arg64));
- err = 0;
- err |=
- copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
- sizeof(arg64.LUN_info));
- err |=
- copy_from_user(&arg64.Request, &arg32->Request,
- sizeof(arg64.Request));
- err |=
- copy_from_user(&arg64.error_info, &arg32->error_info,
- sizeof(arg64.error_info));
- err |= get_user(arg64.buf_size, &arg32->buf_size);
- err |= get_user(cp, &arg32->buf);
- arg64.buf = compat_ptr(cp);
- err |= copy_to_user(p, &arg64, sizeof(arg64));
-
- if (err)
- return -EFAULT;
-
- err = cciss_ioctl(bdev, mode, CCISS_PASSTHRU, (unsigned long)p);
- if (err)
- return err;
- err |=
- copy_in_user(&arg32->error_info, &p->error_info,
- sizeof(arg32->error_info));
- if (err)
- return -EFAULT;
- return err;
-}
-
-static int cciss_ioctl32_big_passthru(struct block_device *bdev, fmode_t mode,
- unsigned cmd, unsigned long arg)
-{
- BIG_IOCTL32_Command_struct __user *arg32 =
- (BIG_IOCTL32_Command_struct __user *) arg;
- BIG_IOCTL_Command_struct arg64;
- BIG_IOCTL_Command_struct __user *p =
- compat_alloc_user_space(sizeof(arg64));
- int err;
- u32 cp;
-
- memset(&arg64, 0, sizeof(arg64));
- err = 0;
- err |=
- copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
- sizeof(arg64.LUN_info));
- err |=
- copy_from_user(&arg64.Request, &arg32->Request,
- sizeof(arg64.Request));
- err |=
- copy_from_user(&arg64.error_info, &arg32->error_info,
- sizeof(arg64.error_info));
- err |= get_user(arg64.buf_size, &arg32->buf_size);
- err |= get_user(arg64.malloc_size, &arg32->malloc_size);
- err |= get_user(cp, &arg32->buf);
- arg64.buf = compat_ptr(cp);
- err |= copy_to_user(p, &arg64, sizeof(arg64));
-
- if (err)
- return -EFAULT;
-
- err = cciss_ioctl(bdev, mode, CCISS_BIG_PASSTHRU, (unsigned long)p);
- if (err)
- return err;
- err |=
- copy_in_user(&arg32->error_info, &p->error_info,
- sizeof(arg32->error_info));
- if (err)
- return -EFAULT;
- return err;
-}
-#endif
-
-static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo)
-{
- drive_info_struct *drv = get_drv(bdev->bd_disk);
-
- if (!drv->cylinders)
- return -ENXIO;
-
- geo->heads = drv->heads;
- geo->sectors = drv->sectors;
- geo->cylinders = drv->cylinders;
- return 0;
-}
-
-static void check_ioctl_unit_attention(ctlr_info_t *h, CommandList_struct *c)
-{
- if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
- c->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION)
- (void)check_for_unit_attention(h, c);
-}
-
-static int cciss_getpciinfo(ctlr_info_t *h, void __user *argp)
-{
- cciss_pci_info_struct pciinfo;
-
- if (!argp)
- return -EINVAL;
- pciinfo.domain = pci_domain_nr(h->pdev->bus);
- pciinfo.bus = h->pdev->bus->number;
- pciinfo.dev_fn = h->pdev->devfn;
- pciinfo.board_id = h->board_id;
- if (copy_to_user(argp, &pciinfo, sizeof(cciss_pci_info_struct)))
- return -EFAULT;
- return 0;
-}
-
-static int cciss_getintinfo(ctlr_info_t *h, void __user *argp)
-{
- cciss_coalint_struct intinfo;
- unsigned long flags;
-
- if (!argp)
- return -EINVAL;
- spin_lock_irqsave(&h->lock, flags);
- intinfo.delay = readl(&h->cfgtable->HostWrite.CoalIntDelay);
- intinfo.count = readl(&h->cfgtable->HostWrite.CoalIntCount);
- spin_unlock_irqrestore(&h->lock, flags);
- if (copy_to_user
- (argp, &intinfo, sizeof(cciss_coalint_struct)))
- return -EFAULT;
- return 0;
-}
-
-static int cciss_setintinfo(ctlr_info_t *h, void __user *argp)
-{
- cciss_coalint_struct intinfo;
- unsigned long flags;
- int i;
-
- if (!argp)
- return -EINVAL;
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
- if (copy_from_user(&intinfo, argp, sizeof(intinfo)))
- return -EFAULT;
- if ((intinfo.delay == 0) && (intinfo.count == 0))
- return -EINVAL;
- spin_lock_irqsave(&h->lock, flags);
- /* Update the field, and then ring the doorbell */
- writel(intinfo.delay, &(h->cfgtable->HostWrite.CoalIntDelay));
- writel(intinfo.count, &(h->cfgtable->HostWrite.CoalIntCount));
- writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
-
- for (i = 0; i < MAX_IOCTL_CONFIG_WAIT; i++) {
- if (!(readl(h->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq))
- break;
- udelay(1000); /* delay and try again */
- }
- spin_unlock_irqrestore(&h->lock, flags);
- if (i >= MAX_IOCTL_CONFIG_WAIT)
- return -EAGAIN;
- return 0;
-}
-
-static int cciss_getnodename(ctlr_info_t *h, void __user *argp)
-{
- NodeName_type NodeName;
- unsigned long flags;
- int i;
-
- if (!argp)
- return -EINVAL;
- spin_lock_irqsave(&h->lock, flags);
- for (i = 0; i < 16; i++)
- NodeName[i] = readb(&h->cfgtable->ServerName[i]);
- spin_unlock_irqrestore(&h->lock, flags);
- if (copy_to_user(argp, NodeName, sizeof(NodeName_type)))
- return -EFAULT;
- return 0;
-}
-
-static int cciss_setnodename(ctlr_info_t *h, void __user *argp)
-{
- NodeName_type NodeName;
- unsigned long flags;
- int i;
-
- if (!argp)
- return -EINVAL;
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
- if (copy_from_user(NodeName, argp, sizeof(NodeName_type)))
- return -EFAULT;
- spin_lock_irqsave(&h->lock, flags);
- /* Update the field, and then ring the doorbell */
- for (i = 0; i < 16; i++)
- writeb(NodeName[i], &h->cfgtable->ServerName[i]);
- writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
- for (i = 0; i < MAX_IOCTL_CONFIG_WAIT; i++) {
- if (!(readl(h->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq))
- break;
- udelay(1000); /* delay and try again */
- }
- spin_unlock_irqrestore(&h->lock, flags);
- if (i >= MAX_IOCTL_CONFIG_WAIT)
- return -EAGAIN;
- return 0;
-}
-
-static int cciss_getheartbeat(ctlr_info_t *h, void __user *argp)
-{
- Heartbeat_type heartbeat;
- unsigned long flags;
-
- if (!argp)
- return -EINVAL;
- spin_lock_irqsave(&h->lock, flags);
- heartbeat = readl(&h->cfgtable->HeartBeat);
- spin_unlock_irqrestore(&h->lock, flags);
- if (copy_to_user(argp, &heartbeat, sizeof(Heartbeat_type)))
- return -EFAULT;
- return 0;
-}
-
-static int cciss_getbustypes(ctlr_info_t *h, void __user *argp)
-{
- BusTypes_type BusTypes;
- unsigned long flags;
-
- if (!argp)
- return -EINVAL;
- spin_lock_irqsave(&h->lock, flags);
- BusTypes = readl(&h->cfgtable->BusTypes);
- spin_unlock_irqrestore(&h->lock, flags);
- if (copy_to_user(argp, &BusTypes, sizeof(BusTypes_type)))
- return -EFAULT;
- return 0;
-}
-
-static int cciss_getfirmver(ctlr_info_t *h, void __user *argp)
-{
- FirmwareVer_type firmware;
-
- if (!argp)
- return -EINVAL;
- memcpy(firmware, h->firm_ver, 4);
-
- if (copy_to_user
- (argp, firmware, sizeof(FirmwareVer_type)))
- return -EFAULT;
- return 0;
-}
-
-static int cciss_getdrivver(ctlr_info_t *h, void __user *argp)
-{
- DriverVer_type DriverVer = DRIVER_VERSION;
-
- if (!argp)
- return -EINVAL;
- if (copy_to_user(argp, &DriverVer, sizeof(DriverVer_type)))
- return -EFAULT;
- return 0;
-}
-
-static int cciss_getluninfo(ctlr_info_t *h,
- struct gendisk *disk, void __user *argp)
-{
- LogvolInfo_struct luninfo;
- drive_info_struct *drv = get_drv(disk);
-
- if (!argp)
- return -EINVAL;
- memcpy(&luninfo.LunID, drv->LunID, sizeof(luninfo.LunID));
- luninfo.num_opens = drv->usage_count;
- luninfo.num_parts = 0;
- if (copy_to_user(argp, &luninfo, sizeof(LogvolInfo_struct)))
- return -EFAULT;
- return 0;
-}
-
-static int cciss_passthru(ctlr_info_t *h, void __user *argp)
-{
- IOCTL_Command_struct iocommand;
- CommandList_struct *c;
- char *buff = NULL;
- u64bit temp64;
- DECLARE_COMPLETION_ONSTACK(wait);
-
- if (!argp)
- return -EINVAL;
-
- if (!capable(CAP_SYS_RAWIO))
- return -EPERM;
-
- if (copy_from_user
- (&iocommand, argp, sizeof(IOCTL_Command_struct)))
- return -EFAULT;
- if ((iocommand.buf_size < 1) &&
- (iocommand.Request.Type.Direction != XFER_NONE)) {
- return -EINVAL;
- }
- if (iocommand.buf_size > 0) {
- buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
- if (buff == NULL)
- return -EFAULT;
- }
- if (iocommand.Request.Type.Direction == XFER_WRITE) {
- /* Copy the data into the buffer we created */
- if (copy_from_user(buff, iocommand.buf, iocommand.buf_size)) {
- kfree(buff);
- return -EFAULT;
- }
- } else {
- memset(buff, 0, iocommand.buf_size);
- }
- c = cmd_special_alloc(h);
- if (!c) {
- kfree(buff);
- return -ENOMEM;
- }
- /* Fill in the command type */
- c->cmd_type = CMD_IOCTL_PEND;
- /* Fill in Command Header */
- c->Header.ReplyQueue = 0; /* unused in simple mode */
- if (iocommand.buf_size > 0) { /* buffer to fill */
- c->Header.SGList = 1;
- c->Header.SGTotal = 1;
- } else { /* no buffers to fill */
- c->Header.SGList = 0;
- c->Header.SGTotal = 0;
- }
- c->Header.LUN = iocommand.LUN_info;
- /* use the kernel address the cmd block for tag */
- c->Header.Tag.lower = c->busaddr;
-
- /* Fill in Request block */
- c->Request = iocommand.Request;
-
- /* Fill in the scatter gather information */
- if (iocommand.buf_size > 0) {
- temp64.val = pci_map_single(h->pdev, buff,
- iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
- c->SG[0].Addr.lower = temp64.val32.lower;
- c->SG[0].Addr.upper = temp64.val32.upper;
- c->SG[0].Len = iocommand.buf_size;
- c->SG[0].Ext = 0; /* we are not chaining */
- }
- c->waiting = &wait;
-
- enqueue_cmd_and_start_io(h, c);
- wait_for_completion(&wait);
-
- /* unlock the buffers from DMA */
- temp64.val32.lower = c->SG[0].Addr.lower;
- temp64.val32.upper = c->SG[0].Addr.upper;
- pci_unmap_single(h->pdev, (dma_addr_t) temp64.val, iocommand.buf_size,
- PCI_DMA_BIDIRECTIONAL);
- check_ioctl_unit_attention(h, c);
-
- /* Copy the error information out */
- iocommand.error_info = *(c->err_info);
- if (copy_to_user(argp, &iocommand, sizeof(IOCTL_Command_struct))) {
- kfree(buff);
- cmd_special_free(h, c);
- return -EFAULT;
- }
-
- if (iocommand.Request.Type.Direction == XFER_READ) {
- /* Copy the data out of the buffer we created */
- if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) {
- kfree(buff);
- cmd_special_free(h, c);
- return -EFAULT;
- }
- }
- kfree(buff);
- cmd_special_free(h, c);
- return 0;
-}
-
-static int cciss_bigpassthru(ctlr_info_t *h, void __user *argp)
-{
- BIG_IOCTL_Command_struct *ioc;
- CommandList_struct *c;
- unsigned char **buff = NULL;
- int *buff_size = NULL;
- u64bit temp64;
- BYTE sg_used = 0;
- int status = 0;
- int i;
- DECLARE_COMPLETION_ONSTACK(wait);
- __u32 left;
- __u32 sz;
- BYTE __user *data_ptr;
-
- if (!argp)
- return -EINVAL;
- if (!capable(CAP_SYS_RAWIO))
- return -EPERM;
- ioc = kmalloc(sizeof(*ioc), GFP_KERNEL);
- if (!ioc) {
- status = -ENOMEM;
- goto cleanup1;
- }
- if (copy_from_user(ioc, argp, sizeof(*ioc))) {
- status = -EFAULT;
- goto cleanup1;
- }
- if ((ioc->buf_size < 1) &&
- (ioc->Request.Type.Direction != XFER_NONE)) {
- status = -EINVAL;
- goto cleanup1;
- }
- /* Check kmalloc limits using all SGs */
- if (ioc->malloc_size > MAX_KMALLOC_SIZE) {
- status = -EINVAL;
- goto cleanup1;
- }
- if (ioc->buf_size > ioc->malloc_size * MAXSGENTRIES) {
- status = -EINVAL;
- goto cleanup1;
- }
- buff = kzalloc(MAXSGENTRIES * sizeof(char *), GFP_KERNEL);
- if (!buff) {
- status = -ENOMEM;
- goto cleanup1;
- }
- buff_size = kmalloc(MAXSGENTRIES * sizeof(int), GFP_KERNEL);
- if (!buff_size) {
- status = -ENOMEM;
- goto cleanup1;
- }
- left = ioc->buf_size;
- data_ptr = ioc->buf;
- while (left) {
- sz = (left > ioc->malloc_size) ? ioc->malloc_size : left;
- buff_size[sg_used] = sz;
- buff[sg_used] = kmalloc(sz, GFP_KERNEL);
- if (buff[sg_used] == NULL) {
- status = -ENOMEM;
- goto cleanup1;
- }
- if (ioc->Request.Type.Direction == XFER_WRITE) {
- if (copy_from_user(buff[sg_used], data_ptr, sz)) {
- status = -EFAULT;
- goto cleanup1;
- }
- } else {
- memset(buff[sg_used], 0, sz);
- }
- left -= sz;
- data_ptr += sz;
- sg_used++;
- }
- c = cmd_special_alloc(h);
- if (!c) {
- status = -ENOMEM;
- goto cleanup1;
- }
- c->cmd_type = CMD_IOCTL_PEND;
- c->Header.ReplyQueue = 0;
- c->Header.SGList = sg_used;
- c->Header.SGTotal = sg_used;
- c->Header.LUN = ioc->LUN_info;
- c->Header.Tag.lower = c->busaddr;
-
- c->Request = ioc->Request;
- for (i = 0; i < sg_used; i++) {
- temp64.val = pci_map_single(h->pdev, buff[i], buff_size[i],
- PCI_DMA_BIDIRECTIONAL);
- c->SG[i].Addr.lower = temp64.val32.lower;
- c->SG[i].Addr.upper = temp64.val32.upper;
- c->SG[i].Len = buff_size[i];
- c->SG[i].Ext = 0; /* we are not chaining */
- }
- c->waiting = &wait;
- enqueue_cmd_and_start_io(h, c);
- wait_for_completion(&wait);
- /* unlock the buffers from DMA */
- for (i = 0; i < sg_used; i++) {
- temp64.val32.lower = c->SG[i].Addr.lower;
- temp64.val32.upper = c->SG[i].Addr.upper;
- pci_unmap_single(h->pdev,
- (dma_addr_t) temp64.val, buff_size[i],
- PCI_DMA_BIDIRECTIONAL);
- }
- check_ioctl_unit_attention(h, c);
- /* Copy the error information out */
- ioc->error_info = *(c->err_info);
- if (copy_to_user(argp, ioc, sizeof(*ioc))) {
- cmd_special_free(h, c);
- status = -EFAULT;
- goto cleanup1;
- }
- if (ioc->Request.Type.Direction == XFER_READ) {
- /* Copy the data out of the buffer we created */
- BYTE __user *ptr = ioc->buf;
- for (i = 0; i < sg_used; i++) {
- if (copy_to_user(ptr, buff[i], buff_size[i])) {
- cmd_special_free(h, c);
- status = -EFAULT;
- goto cleanup1;
- }
- ptr += buff_size[i];
- }
- }
- cmd_special_free(h, c);
- status = 0;
-cleanup1:
- if (buff) {
- for (i = 0; i < sg_used; i++)
- kfree(buff[i]);
- kfree(buff);
- }
- kfree(buff_size);
- kfree(ioc);
- return status;
-}
-
-static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
- unsigned int cmd, unsigned long arg)
-{
- struct gendisk *disk = bdev->bd_disk;
- ctlr_info_t *h = get_host(disk);
- void __user *argp = (void __user *)arg;
-
- dev_dbg(&h->pdev->dev, "cciss_ioctl: Called with cmd=%x %lx\n",
- cmd, arg);
- switch (cmd) {
- case CCISS_GETPCIINFO:
- return cciss_getpciinfo(h, argp);
- case CCISS_GETINTINFO:
- return cciss_getintinfo(h, argp);
- case CCISS_SETINTINFO:
- return cciss_setintinfo(h, argp);
- case CCISS_GETNODENAME:
- return cciss_getnodename(h, argp);
- case CCISS_SETNODENAME:
- return cciss_setnodename(h, argp);
- case CCISS_GETHEARTBEAT:
- return cciss_getheartbeat(h, argp);
- case CCISS_GETBUSTYPES:
- return cciss_getbustypes(h, argp);
- case CCISS_GETFIRMVER:
- return cciss_getfirmver(h, argp);
- case CCISS_GETDRIVVER:
- return cciss_getdrivver(h, argp);
- case CCISS_DEREGDISK:
- case CCISS_REGNEWD:
- case CCISS_REVALIDVOLS:
- return rebuild_lun_table(h, 0, 1);
- case CCISS_GETLUNINFO:
- return cciss_getluninfo(h, disk, argp);
- case CCISS_PASSTHRU:
- return cciss_passthru(h, argp);
- case CCISS_BIG_PASSTHRU:
- return cciss_bigpassthru(h, argp);
-
- /* scsi_cmd_blk_ioctl handles these, below, though some are not */
- /* very meaningful for cciss. SG_IO is the main one people want. */
-
- case SG_GET_VERSION_NUM:
- case SG_SET_TIMEOUT:
- case SG_GET_TIMEOUT:
- case SG_GET_RESERVED_SIZE:
- case SG_SET_RESERVED_SIZE:
- case SG_EMULATED_HOST:
- case SG_IO:
- case SCSI_IOCTL_SEND_COMMAND:
- return scsi_cmd_blk_ioctl(bdev, mode, cmd, argp);
-
- /* scsi_cmd_blk_ioctl would normally handle these, below, but */
- /* they aren't a good fit for cciss, as CD-ROMs are */
- /* not supported, and we don't have any bus/target/lun */
- /* which we present to the kernel. */
-
- case CDROM_SEND_PACKET:
- case CDROMCLOSETRAY:
- case CDROMEJECT:
- case SCSI_IOCTL_GET_IDLUN:
- case SCSI_IOCTL_GET_BUS_NUMBER:
- default:
- return -ENOTTY;
- }
-}
-
-static void cciss_check_queues(ctlr_info_t *h)
-{
- int start_queue = h->next_to_run;
- int i;
-
- /* check to see if we have maxed out the number of commands that can
- * be placed on the queue. If so then exit. We do this check here
- * in case the interrupt we serviced was from an ioctl and did not
- * free any new commands.
- */
- if ((find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds)) == h->nr_cmds)
- return;
-
- /* We have room on the queue for more commands. Now we need to queue
- * them up. We will also keep track of the next queue to run so
- * that every queue gets a chance to be started first.
- */
- for (i = 0; i < h->highest_lun + 1; i++) {
- int curr_queue = (start_queue + i) % (h->highest_lun + 1);
- /* make sure the disk has been added and the drive is real
- * because this can be called from the middle of init_one.
- */
- if (!h->drv[curr_queue])
- continue;
- if (!(h->drv[curr_queue]->queue) ||
- !(h->drv[curr_queue]->heads))
- continue;
- blk_start_queue(h->gendisk[curr_queue]->queue);
-
- /* check to see if we have maxed out the number of commands
- * that can be placed on the queue.
- */
- if ((find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds)) == h->nr_cmds) {
- if (curr_queue == start_queue) {
- h->next_to_run =
- (start_queue + 1) % (h->highest_lun + 1);
- break;
- } else {
- h->next_to_run = curr_queue;
- break;
- }
- }
- }
-}
-
-static void cciss_softirq_done(struct request *rq)
-{
- CommandList_struct *c = rq->completion_data;
- ctlr_info_t *h = hba[c->ctlr];
- SGDescriptor_struct *curr_sg = c->SG;
- u64bit temp64;
- unsigned long flags;
- int i, ddir;
- int sg_index = 0;
-
- if (c->Request.Type.Direction == XFER_READ)
- ddir = PCI_DMA_FROMDEVICE;
- else
- ddir = PCI_DMA_TODEVICE;
-
- /* command did not need to be retried */
- /* unmap the DMA mapping for all the scatter gather elements */
- for (i = 0; i < c->Header.SGList; i++) {
- if (curr_sg[sg_index].Ext == CCISS_SG_CHAIN) {
- cciss_unmap_sg_chain_block(h, c);
- /* Point to the next block */
- curr_sg = h->cmd_sg_list[c->cmdindex];
- sg_index = 0;
- }
- temp64.val32.lower = curr_sg[sg_index].Addr.lower;
- temp64.val32.upper = curr_sg[sg_index].Addr.upper;
- pci_unmap_page(h->pdev, temp64.val, curr_sg[sg_index].Len,
- ddir);
- ++sg_index;
- }
-
- dev_dbg(&h->pdev->dev, "Done with %p\n", rq);
-
- /* set the residual count for pc requests */
- if (blk_rq_is_passthrough(rq))
- scsi_req(rq)->resid_len = c->err_info->ResidualCnt;
- blk_end_request_all(rq, scsi_req(rq)->result ?
- BLK_STS_IOERR : BLK_STS_OK);
-
- spin_lock_irqsave(&h->lock, flags);
- cmd_free(h, c);
- cciss_check_queues(h);
- spin_unlock_irqrestore(&h->lock, flags);
-}
-
-static inline void log_unit_to_scsi3addr(ctlr_info_t *h,
- unsigned char scsi3addr[], uint32_t log_unit)
-{
- memcpy(scsi3addr, h->drv[log_unit]->LunID,
- sizeof(h->drv[log_unit]->LunID));
-}
-
-/* This function gets the SCSI vendor, model, and revision of a logical drive
- * via the inquiry page 0. Model, vendor, and rev are set to empty strings if
- * they cannot be read.
- */
-static void cciss_get_device_descr(ctlr_info_t *h, int logvol,
- char *vendor, char *model, char *rev)
-{
- int rc;
- InquiryData_struct *inq_buf;
- unsigned char scsi3addr[8];
-
- *vendor = '\0';
- *model = '\0';
- *rev = '\0';
-
- inq_buf = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
- if (!inq_buf)
- return;
-
- log_unit_to_scsi3addr(h, scsi3addr, logvol);
- rc = sendcmd_withirq(h, CISS_INQUIRY, inq_buf, sizeof(*inq_buf), 0,
- scsi3addr, TYPE_CMD);
- if (rc == IO_OK) {
- memcpy(vendor, &inq_buf->data_byte[8], VENDOR_LEN);
- vendor[VENDOR_LEN] = '\0';
- memcpy(model, &inq_buf->data_byte[16], MODEL_LEN);
- model[MODEL_LEN] = '\0';
- memcpy(rev, &inq_buf->data_byte[32], REV_LEN);
- rev[REV_LEN] = '\0';
- }
-
- kfree(inq_buf);
- return;
-}
-
-/* This function gets the serial number of a logical drive via
- * inquiry page 0x83. Serial no. is 16 bytes. If the serial
- * number cannot be had, for whatever reason, 16 bytes of 0xff
- * are returned instead.
- */
-static void cciss_get_serial_no(ctlr_info_t *h, int logvol,
- unsigned char *serial_no, int buflen)
-{
-#define PAGE_83_INQ_BYTES 64
- int rc;
- unsigned char *buf;
- unsigned char scsi3addr[8];
-
- if (buflen > 16)
- buflen = 16;
- memset(serial_no, 0xff, buflen);
- buf = kzalloc(PAGE_83_INQ_BYTES, GFP_KERNEL);
- if (!buf)
- return;
- memset(serial_no, 0, buflen);
- log_unit_to_scsi3addr(h, scsi3addr, logvol);
- rc = sendcmd_withirq(h, CISS_INQUIRY, buf,
- PAGE_83_INQ_BYTES, 0x83, scsi3addr, TYPE_CMD);
- if (rc == IO_OK)
- memcpy(serial_no, &buf[8], buflen);
- kfree(buf);
- return;
-}
-
-static void cciss_initialize_rq(struct request *rq)
-{
- struct scsi_request *sreq = blk_mq_rq_to_pdu(rq);
-
- scsi_req_init(sreq);
-}
-
-/*
- * cciss_add_disk sets up the block device queue for a logical drive
- */
-static int cciss_add_disk(ctlr_info_t *h, struct gendisk *disk,
- int drv_index)
-{
- disk->queue = blk_alloc_queue(GFP_KERNEL);
- if (!disk->queue)
- goto init_queue_failure;
-
- disk->queue->cmd_size = sizeof(struct scsi_request);
- disk->queue->request_fn = do_cciss_request;
- disk->queue->initialize_rq_fn = cciss_initialize_rq;
- disk->queue->queue_lock = &h->lock;
- queue_flag_set_unlocked(QUEUE_FLAG_SCSI_PASSTHROUGH, disk->queue);
- if (blk_init_allocated_queue(disk->queue) < 0)
- goto cleanup_queue;
-
- sprintf(disk->disk_name, "cciss/c%dd%d", h->ctlr, drv_index);
- disk->major = h->major;
- disk->first_minor = drv_index << NWD_SHIFT;
- disk->fops = &cciss_fops;
- if (cciss_create_ld_sysfs_entry(h, drv_index))
- goto cleanup_queue;
- disk->private_data = h->drv[drv_index];
-
- /* Set up queue information */
- blk_queue_bounce_limit(disk->queue, h->pdev->dma_mask);
-
- /* This is a hardware imposed limit. */
- blk_queue_max_segments(disk->queue, h->maxsgentries);
-
- blk_queue_max_hw_sectors(disk->queue, h->cciss_max_sectors);
-
- blk_queue_softirq_done(disk->queue, cciss_softirq_done);
-
- disk->queue->queuedata = h;
-
- blk_queue_logical_block_size(disk->queue,
- h->drv[drv_index]->block_size);
-
- /* Make sure all queue data is written out before */
- /* setting h->drv[drv_index]->queue, as setting this */
- /* allows the interrupt handler to start the queue */
- wmb();
- h->drv[drv_index]->queue = disk->queue;
- device_add_disk(&h->drv[drv_index]->dev, disk);
- return 0;
-
-cleanup_queue:
- blk_cleanup_queue(disk->queue);
- disk->queue = NULL;
-init_queue_failure:
- return -1;
-}
-
-/* This function will check the usage_count of the drive to be updated/added.
- * If the usage_count is zero and it is a heretofore unknown drive, or,
- * the drive's capacity, geometry, or serial number has changed,
- * then the drive information will be updated and the disk will be
- * re-registered with the kernel. If these conditions don't hold,
- * then it will be left alone for the next reboot. The exception to this
- * is disk 0 which will always be left registered with the kernel since it
- * is also the controller node. Any changes to disk 0 will show up on
- * the next reboot.
- */
-static void cciss_update_drive_info(ctlr_info_t *h, int drv_index,
- int first_time, int via_ioctl)
-{
- struct gendisk *disk;
- InquiryData_struct *inq_buff = NULL;
- unsigned int block_size;
- sector_t total_size;
- unsigned long flags = 0;
- int ret = 0;
- drive_info_struct *drvinfo;
-
- /* Get information about the disk and modify the driver structure */
- inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL);
- drvinfo = kzalloc(sizeof(*drvinfo), GFP_KERNEL);
- if (inq_buff == NULL || drvinfo == NULL)
- goto mem_msg;
-
- /* testing to see if 16-byte CDBs are already being used */
- if (h->cciss_read == CCISS_READ_16) {
- cciss_read_capacity_16(h, drv_index,
- &total_size, &block_size);
-
- } else {
- cciss_read_capacity(h, drv_index, &total_size, &block_size);
- /* if read_capacity returns all F's this volume is >2TB */
- /* in size so we switch to 16-byte CDB's for all */
- /* read/write ops */
- if (total_size == 0xFFFFFFFFULL) {
- cciss_read_capacity_16(h, drv_index,
- &total_size, &block_size);
- h->cciss_read = CCISS_READ_16;
- h->cciss_write = CCISS_WRITE_16;
- } else {
- h->cciss_read = CCISS_READ_10;
- h->cciss_write = CCISS_WRITE_10;
- }
- }
-
- cciss_geometry_inquiry(h, drv_index, total_size, block_size,
- inq_buff, drvinfo);
- drvinfo->block_size = block_size;
- drvinfo->nr_blocks = total_size + 1;
-
- cciss_get_device_descr(h, drv_index, drvinfo->vendor,
- drvinfo->model, drvinfo->rev);
- cciss_get_serial_no(h, drv_index, drvinfo->serial_no,
- sizeof(drvinfo->serial_no));
- /* Save the lunid in case we deregister the disk, below. */
- memcpy(drvinfo->LunID, h->drv[drv_index]->LunID,
- sizeof(drvinfo->LunID));
-
- /* Is it the same disk we already know, and nothing's changed? */
- if (h->drv[drv_index]->raid_level != -1 &&
- ((memcmp(drvinfo->serial_no,
- h->drv[drv_index]->serial_no, 16) == 0) &&
- drvinfo->block_size == h->drv[drv_index]->block_size &&
- drvinfo->nr_blocks == h->drv[drv_index]->nr_blocks &&
- drvinfo->heads == h->drv[drv_index]->heads &&
- drvinfo->sectors == h->drv[drv_index]->sectors &&
- drvinfo->cylinders == h->drv[drv_index]->cylinders))
- /* The disk is unchanged, nothing to update */
- goto freeret;
-
- /* If we get here it's not the same disk, or something's changed,
- * so we need to * deregister it, and re-register it, if it's not
- * in use.
- * If the disk already exists then deregister it before proceeding
- * (unless it's the first disk (for the controller node).
- */
- if (h->drv[drv_index]->raid_level != -1 && drv_index != 0) {
- dev_warn(&h->pdev->dev, "disk %d has changed.\n", drv_index);
- spin_lock_irqsave(&h->lock, flags);
- h->drv[drv_index]->busy_configuring = 1;
- spin_unlock_irqrestore(&h->lock, flags);
-
- /* deregister_disk sets h->drv[drv_index]->queue = NULL
- * which keeps the interrupt handler from starting
- * the queue.
- */
- ret = deregister_disk(h, drv_index, 0, via_ioctl);
- }
-
- /* If the disk is in use return */
- if (ret)
- goto freeret;
-
- /* Save the new information from cciss_geometry_inquiry
- * and serial number inquiry. If the disk was deregistered
- * above, then h->drv[drv_index] will be NULL.
- */
- if (h->drv[drv_index] == NULL) {
- drvinfo->device_initialized = 0;
- h->drv[drv_index] = drvinfo;
- drvinfo = NULL; /* so it won't be freed below. */
- } else {
- /* special case for cxd0 */
- h->drv[drv_index]->block_size = drvinfo->block_size;
- h->drv[drv_index]->nr_blocks = drvinfo->nr_blocks;
- h->drv[drv_index]->heads = drvinfo->heads;
- h->drv[drv_index]->sectors = drvinfo->sectors;
- h->drv[drv_index]->cylinders = drvinfo->cylinders;
- h->drv[drv_index]->raid_level = drvinfo->raid_level;
- memcpy(h->drv[drv_index]->serial_no, drvinfo->serial_no, 16);
- memcpy(h->drv[drv_index]->vendor, drvinfo->vendor,
- VENDOR_LEN + 1);
- memcpy(h->drv[drv_index]->model, drvinfo->model, MODEL_LEN + 1);
- memcpy(h->drv[drv_index]->rev, drvinfo->rev, REV_LEN + 1);
- }
-
- ++h->num_luns;
- disk = h->gendisk[drv_index];
- set_capacity(disk, h->drv[drv_index]->nr_blocks);
-
- /* If it's not disk 0 (drv_index != 0)
- * or if it was disk 0, but there was previously
- * no actual corresponding configured logical drive
- * (raid_leve == -1) then we want to update the
- * logical drive's information.
- */
- if (drv_index || first_time) {
- if (cciss_add_disk(h, disk, drv_index) != 0) {
- cciss_free_gendisk(h, drv_index);
- cciss_free_drive_info(h, drv_index);
- dev_warn(&h->pdev->dev, "could not update disk %d\n",
- drv_index);
- --h->num_luns;
- }
- }
-
-freeret:
- kfree(inq_buff);
- kfree(drvinfo);
- return;
-mem_msg:
- dev_err(&h->pdev->dev, "out of memory\n");
- goto freeret;
-}
-
-/* This function will find the first index of the controllers drive array
- * that has a null drv pointer and allocate the drive info struct and
- * will return that index This is where new drives will be added.
- * If the index to be returned is greater than the highest_lun index for
- * the controller then highest_lun is set * to this new index.
- * If there are no available indexes or if tha allocation fails, then -1
- * is returned. * "controller_node" is used to know if this is a real
- * logical drive, or just the controller node, which determines if this
- * counts towards highest_lun.
- */
-static int cciss_alloc_drive_info(ctlr_info_t *h, int controller_node)
-{
- int i;
- drive_info_struct *drv;
-
- /* Search for an empty slot for our drive info */
- for (i = 0; i < CISS_MAX_LUN; i++) {
-
- /* if not cxd0 case, and it's occupied, skip it. */
- if (h->drv[i] && i != 0)
- continue;
- /*
- * If it's cxd0 case, and drv is alloc'ed already, and a
- * disk is configured there, skip it.
- */
- if (i == 0 && h->drv[i] && h->drv[i]->raid_level != -1)
- continue;
-
- /*
- * We've found an empty slot. Update highest_lun
- * provided this isn't just the fake cxd0 controller node.
- */
- if (i > h->highest_lun && !controller_node)
- h->highest_lun = i;
-
- /* If adding a real disk at cxd0, and it's already alloc'ed */
- if (i == 0 && h->drv[i] != NULL)
- return i;
-
- /*
- * Found an empty slot, not already alloc'ed. Allocate it.
- * Mark it with raid_level == -1, so we know it's new later on.
- */
- drv = kzalloc(sizeof(*drv), GFP_KERNEL);
- if (!drv)
- return -1;
- drv->raid_level = -1; /* so we know it's new */
- h->drv[i] = drv;
- return i;
- }
- return -1;
-}
-
-static void cciss_free_drive_info(ctlr_info_t *h, int drv_index)
-{
- kfree(h->drv[drv_index]);
- h->drv[drv_index] = NULL;
-}
-
-static void cciss_free_gendisk(ctlr_info_t *h, int drv_index)
-{
- put_disk(h->gendisk[drv_index]);
- h->gendisk[drv_index] = NULL;
-}
-
-/* cciss_add_gendisk finds a free hba[]->drv structure
- * and allocates a gendisk if needed, and sets the lunid
- * in the drvinfo structure. It returns the index into
- * the ->drv[] array, or -1 if none are free.
- * is_controller_node indicates whether highest_lun should
- * count this disk, or if it's only being added to provide
- * a means to talk to the controller in case no logical
- * drives have yet been configured.
- */
-static int cciss_add_gendisk(ctlr_info_t *h, unsigned char lunid[],
- int controller_node)
-{
- int drv_index;
-
- drv_index = cciss_alloc_drive_info(h, controller_node);
- if (drv_index == -1)
- return -1;
-
- /*Check if the gendisk needs to be allocated */
- if (!h->gendisk[drv_index]) {
- h->gendisk[drv_index] =
- alloc_disk(1 << NWD_SHIFT);
- if (!h->gendisk[drv_index]) {
- dev_err(&h->pdev->dev,
- "could not allocate a new disk %d\n",
- drv_index);
- goto err_free_drive_info;
- }
- }
- memcpy(h->drv[drv_index]->LunID, lunid,
- sizeof(h->drv[drv_index]->LunID));
- if (cciss_create_ld_sysfs_entry(h, drv_index))
- goto err_free_disk;
- /* Don't need to mark this busy because nobody */
- /* else knows about this disk yet to contend */
- /* for access to it. */
- h->drv[drv_index]->busy_configuring = 0;
- wmb();
- return drv_index;
-
-err_free_disk:
- cciss_free_gendisk(h, drv_index);
-err_free_drive_info:
- cciss_free_drive_info(h, drv_index);
- return -1;
-}
-
-/* This is for the special case of a controller which
- * has no logical drives. In this case, we still need
- * to register a disk so the controller can be accessed
- * by the Array Config Utility.
- */
-static void cciss_add_controller_node(ctlr_info_t *h)
-{
- struct gendisk *disk;
- int drv_index;
-
- if (h->gendisk[0] != NULL) /* already did this? Then bail. */
- return;
-
- drv_index = cciss_add_gendisk(h, CTLR_LUNID, 1);
- if (drv_index == -1)
- goto error;
- h->drv[drv_index]->block_size = 512;
- h->drv[drv_index]->nr_blocks = 0;
- h->drv[drv_index]->heads = 0;
- h->drv[drv_index]->sectors = 0;
- h->drv[drv_index]->cylinders = 0;
- h->drv[drv_index]->raid_level = -1;
- memset(h->drv[drv_index]->serial_no, 0, 16);
- disk = h->gendisk[drv_index];
- if (cciss_add_disk(h, disk, drv_index) == 0)
- return;
- cciss_free_gendisk(h, drv_index);
- cciss_free_drive_info(h, drv_index);
-error:
- dev_warn(&h->pdev->dev, "could not add disk 0.\n");
- return;
-}
-
-/* This function will add and remove logical drives from the Logical
- * drive array of the controller and maintain persistency of ordering
- * so that mount points are preserved until the next reboot. This allows
- * for the removal of logical drives in the middle of the drive array
- * without a re-ordering of those drives.
- * INPUT
- * h = The controller to perform the operations on
- */
-static int rebuild_lun_table(ctlr_info_t *h, int first_time,
- int via_ioctl)
-{
- int num_luns;
- ReportLunData_struct *ld_buff = NULL;
- int return_code;
- int listlength = 0;
- int i;
- int drv_found;
- int drv_index = 0;
- unsigned char lunid[8] = CTLR_LUNID;
- unsigned long flags;
-
- if (!capable(CAP_SYS_RAWIO))
- return -EPERM;
-
- /* Set busy_configuring flag for this operation */
- spin_lock_irqsave(&h->lock, flags);
- if (h->busy_configuring) {
- spin_unlock_irqrestore(&h->lock, flags);
- return -EBUSY;
- }
- h->busy_configuring = 1;
- spin_unlock_irqrestore(&h->lock, flags);
-
- ld_buff = kzalloc(sizeof(ReportLunData_struct), GFP_KERNEL);
- if (ld_buff == NULL)
- goto mem_msg;
-
- return_code = sendcmd_withirq(h, CISS_REPORT_LOG, ld_buff,
- sizeof(ReportLunData_struct),
- 0, CTLR_LUNID, TYPE_CMD);
-
- if (return_code == IO_OK)
- listlength = be32_to_cpu(*(__be32 *) ld_buff->LUNListLength);
- else { /* reading number of logical volumes failed */
- dev_warn(&h->pdev->dev,
- "report logical volume command failed\n");
- listlength = 0;
- goto freeret;
- }
-
- num_luns = listlength / 8; /* 8 bytes per entry */
- if (num_luns > CISS_MAX_LUN) {
- num_luns = CISS_MAX_LUN;
- dev_warn(&h->pdev->dev, "more luns configured"
- " on controller than can be handled by"
- " this driver.\n");
- }
-
- if (num_luns == 0)
- cciss_add_controller_node(h);
-
- /* Compare controller drive array to driver's drive array
- * to see if any drives are missing on the controller due
- * to action of Array Config Utility (user deletes drive)
- * and deregister logical drives which have disappeared.
- */
- for (i = 0; i <= h->highest_lun; i++) {
- int j;
- drv_found = 0;
-
- /* skip holes in the array from already deleted drives */
- if (h->drv[i] == NULL)
- continue;
-
- for (j = 0; j < num_luns; j++) {
- memcpy(lunid, &ld_buff->LUN[j][0], sizeof(lunid));
- if (memcmp(h->drv[i]->LunID, lunid,
- sizeof(lunid)) == 0) {
- drv_found = 1;
- break;
- }
- }
- if (!drv_found) {
- /* Deregister it from the OS, it's gone. */
- spin_lock_irqsave(&h->lock, flags);
- h->drv[i]->busy_configuring = 1;
- spin_unlock_irqrestore(&h->lock, flags);
- return_code = deregister_disk(h, i, 1, via_ioctl);
- if (h->drv[i] != NULL)
- h->drv[i]->busy_configuring = 0;
- }
- }
-
- /* Compare controller drive array to driver's drive array.
- * Check for updates in the drive information and any new drives
- * on the controller due to ACU adding logical drives, or changing
- * a logical drive's size, etc. Reregister any new/changed drives
- */
- for (i = 0; i < num_luns; i++) {
- int j;
-
- drv_found = 0;
-
- memcpy(lunid, &ld_buff->LUN[i][0], sizeof(lunid));
- /* Find if the LUN is already in the drive array
- * of the driver. If so then update its info
- * if not in use. If it does not exist then find
- * the first free index and add it.
- */
- for (j = 0; j <= h->highest_lun; j++) {
- if (h->drv[j] != NULL &&
- memcmp(h->drv[j]->LunID, lunid,
- sizeof(h->drv[j]->LunID)) == 0) {
- drv_index = j;
- drv_found = 1;
- break;
- }
- }
-
- /* check if the drive was found already in the array */
- if (!drv_found) {
- drv_index = cciss_add_gendisk(h, lunid, 0);
- if (drv_index == -1)
- goto freeret;
- }
- cciss_update_drive_info(h, drv_index, first_time, via_ioctl);
- } /* end for */
-
-freeret:
- kfree(ld_buff);
- h->busy_configuring = 0;
- /* We return -1 here to tell the ACU that we have registered/updated
- * all of the drives that we can and to keep it from calling us
- * additional times.
- */
- return -1;
-mem_msg:
- dev_err(&h->pdev->dev, "out of memory\n");
- h->busy_configuring = 0;
- goto freeret;
-}
-
-static void cciss_clear_drive_info(drive_info_struct *drive_info)
-{
- /* zero out the disk size info */
- drive_info->nr_blocks = 0;
- drive_info->block_size = 0;
- drive_info->heads = 0;
- drive_info->sectors = 0;
- drive_info->cylinders = 0;
- drive_info->raid_level = -1;
- memset(drive_info->serial_no, 0, sizeof(drive_info->serial_no));
- memset(drive_info->model, 0, sizeof(drive_info->model));
- memset(drive_info->rev, 0, sizeof(drive_info->rev));
- memset(drive_info->vendor, 0, sizeof(drive_info->vendor));
- /*
- * don't clear the LUNID though, we need to remember which
- * one this one is.
- */
-}
-
-/* This function will deregister the disk and it's queue from the
- * kernel. It must be called with the controller lock held and the
- * drv structures busy_configuring flag set. It's parameters are:
- *
- * disk = This is the disk to be deregistered
- * drv = This is the drive_info_struct associated with the disk to be
- * deregistered. It contains information about the disk used
- * by the driver.
- * clear_all = This flag determines whether or not the disk information
- * is going to be completely cleared out and the highest_lun
- * reset. Sometimes we want to clear out information about
- * the disk in preparation for re-adding it. In this case
- * the highest_lun should be left unchanged and the LunID
- * should not be cleared.
- * via_ioctl
- * This indicates whether we've reached this path via ioctl.
- * This affects the maximum usage count allowed for c0d0 to be messed with.
- * If this path is reached via ioctl(), then the max_usage_count will
- * be 1, as the process calling ioctl() has got to have the device open.
- * If we get here via sysfs, then the max usage count will be zero.
-*/
-static int deregister_disk(ctlr_info_t *h, int drv_index,
- int clear_all, int via_ioctl)
-{
- int i;
- struct gendisk *disk;
- drive_info_struct *drv;
- int recalculate_highest_lun;
-
- if (!capable(CAP_SYS_RAWIO))
- return -EPERM;
-
- drv = h->drv[drv_index];
- disk = h->gendisk[drv_index];
-
- /* make sure logical volume is NOT is use */
- if (clear_all || (h->gendisk[0] == disk)) {
- if (drv->usage_count > via_ioctl)
- return -EBUSY;
- } else if (drv->usage_count > 0)
- return -EBUSY;
-
- recalculate_highest_lun = (drv == h->drv[h->highest_lun]);
-
- /* invalidate the devices and deregister the disk. If it is disk
- * zero do not deregister it but just zero out it's values. This
- * allows us to delete disk zero but keep the controller registered.
- */
- if (h->gendisk[0] != disk) {
- struct request_queue *q = disk->queue;
- if (disk->flags & GENHD_FL_UP) {
- cciss_destroy_ld_sysfs_entry(h, drv_index, 0);
- del_gendisk(disk);
- }
- if (q)
- blk_cleanup_queue(q);
- /* If clear_all is set then we are deleting the logical
- * drive, not just refreshing its info. For drives
- * other than disk 0 we will call put_disk. We do not
- * do this for disk 0 as we need it to be able to
- * configure the controller.
- */
- if (clear_all){
- /* This isn't pretty, but we need to find the
- * disk in our array and NULL our the pointer.
- * This is so that we will call alloc_disk if
- * this index is used again later.
- */
- for (i=0; i < CISS_MAX_LUN; i++){
- if (h->gendisk[i] == disk) {
- h->gendisk[i] = NULL;
- break;
- }
- }
- put_disk(disk);
- }
- } else {
- set_capacity(disk, 0);
- cciss_clear_drive_info(drv);
- }
-
- --h->num_luns;
-
- /* if it was the last disk, find the new hightest lun */
- if (clear_all && recalculate_highest_lun) {
- int newhighest = -1;
- for (i = 0; i <= h->highest_lun; i++) {
- /* if the disk has size > 0, it is available */
- if (h->drv[i] && h->drv[i]->heads)
- newhighest = i;
- }
- h->highest_lun = newhighest;
- }
- return 0;
-}
-
-static int fill_cmd(ctlr_info_t *h, CommandList_struct *c, __u8 cmd, void *buff,
- size_t size, __u8 page_code, unsigned char *scsi3addr,
- int cmd_type)
-{
- u64bit buff_dma_handle;
- int status = IO_OK;
-
- c->cmd_type = CMD_IOCTL_PEND;
- c->Header.ReplyQueue = 0;
- if (buff != NULL) {
- c->Header.SGList = 1;
- c->Header.SGTotal = 1;
- } else {
- c->Header.SGList = 0;
- c->Header.SGTotal = 0;
- }
- c->Header.Tag.lower = c->busaddr;
- memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 8);
-
- c->Request.Type.Type = cmd_type;
- if (cmd_type == TYPE_CMD) {
- switch (cmd) {
- case CISS_INQUIRY:
- /* are we trying to read a vital product page */
- if (page_code != 0) {
- c->Request.CDB[1] = 0x01;
- c->Request.CDB[2] = page_code;
- }
- c->Request.CDBLen = 6;
- c->Request.Type.Attribute = ATTR_SIMPLE;
- c->Request.Type.Direction = XFER_READ;
- c->Request.Timeout = 0;
- c->Request.CDB[0] = CISS_INQUIRY;
- c->Request.CDB[4] = size & 0xFF;
- break;
- case CISS_REPORT_LOG:
- case CISS_REPORT_PHYS:
- /* Talking to controller so It's a physical command
- mode = 00 target = 0. Nothing to write.
- */
- c->Request.CDBLen = 12;
- c->Request.Type.Attribute = ATTR_SIMPLE;
- c->Request.Type.Direction = XFER_READ;
- c->Request.Timeout = 0;
- c->Request.CDB[0] = cmd;
- c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */
- c->Request.CDB[7] = (size >> 16) & 0xFF;
- c->Request.CDB[8] = (size >> 8) & 0xFF;
- c->Request.CDB[9] = size & 0xFF;
- break;
-
- case CCISS_READ_CAPACITY:
- c->Request.CDBLen = 10;
- c->Request.Type.Attribute = ATTR_SIMPLE;
- c->Request.Type.Direction = XFER_READ;
- c->Request.Timeout = 0;
- c->Request.CDB[0] = cmd;
- break;
- case CCISS_READ_CAPACITY_16:
- c->Request.CDBLen = 16;
- c->Request.Type.Attribute = ATTR_SIMPLE;
- c->Request.Type.Direction = XFER_READ;
- c->Request.Timeout = 0;
- c->Request.CDB[0] = cmd;
- c->Request.CDB[1] = 0x10;
- c->Request.CDB[10] = (size >> 24) & 0xFF;
- c->Request.CDB[11] = (size >> 16) & 0xFF;
- c->Request.CDB[12] = (size >> 8) & 0xFF;
- c->Request.CDB[13] = size & 0xFF;
- c->Request.Timeout = 0;
- c->Request.CDB[0] = cmd;
- break;
- case CCISS_CACHE_FLUSH:
- c->Request.CDBLen = 12;
- c->Request.Type.Attribute = ATTR_SIMPLE;
- c->Request.Type.Direction = XFER_WRITE;
- c->Request.Timeout = 0;
- c->Request.CDB[0] = BMIC_WRITE;
- c->Request.CDB[6] = BMIC_CACHE_FLUSH;
- c->Request.CDB[7] = (size >> 8) & 0xFF;
- c->Request.CDB[8] = size & 0xFF;
- break;
- case TEST_UNIT_READY:
- c->Request.CDBLen = 6;
- c->Request.Type.Attribute = ATTR_SIMPLE;
- c->Request.Type.Direction = XFER_NONE;
- c->Request.Timeout = 0;
- break;
- default:
- dev_warn(&h->pdev->dev, "Unknown Command 0x%c\n", cmd);
- return IO_ERROR;
- }
- } else if (cmd_type == TYPE_MSG) {
- switch (cmd) {
- case CCISS_ABORT_MSG:
- c->Request.CDBLen = 12;
- c->Request.Type.Attribute = ATTR_SIMPLE;
- c->Request.Type.Direction = XFER_WRITE;
- c->Request.Timeout = 0;
- c->Request.CDB[0] = cmd; /* abort */
- c->Request.CDB[1] = 0; /* abort a command */
- /* buff contains the tag of the command to abort */
- memcpy(&c->Request.CDB[4], buff, 8);
- break;
- case CCISS_RESET_MSG:
- c->Request.CDBLen = 16;
- c->Request.Type.Attribute = ATTR_SIMPLE;
- c->Request.Type.Direction = XFER_NONE;
- c->Request.Timeout = 0;
- memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
- c->Request.CDB[0] = cmd; /* reset */
- c->Request.CDB[1] = CCISS_RESET_TYPE_TARGET;
- break;
- case CCISS_NOOP_MSG:
- c->Request.CDBLen = 1;
- c->Request.Type.Attribute = ATTR_SIMPLE;
- c->Request.Type.Direction = XFER_WRITE;
- c->Request.Timeout = 0;
- c->Request.CDB[0] = cmd;
- break;
- default:
- dev_warn(&h->pdev->dev,
- "unknown message type %d\n", cmd);
- return IO_ERROR;
- }
- } else {
- dev_warn(&h->pdev->dev, "unknown command type %d\n", cmd_type);
- return IO_ERROR;
- }
- /* Fill in the scatter gather information */
- if (size > 0) {
- buff_dma_handle.val = (__u64) pci_map_single(h->pdev,
- buff, size,
- PCI_DMA_BIDIRECTIONAL);
- c->SG[0].Addr.lower = buff_dma_handle.val32.lower;
- c->SG[0].Addr.upper = buff_dma_handle.val32.upper;
- c->SG[0].Len = size;
- c->SG[0].Ext = 0; /* we are not chaining */
- }
- return status;
-}
-
-static int cciss_send_reset(ctlr_info_t *h, unsigned char *scsi3addr,
- u8 reset_type)
-{
- CommandList_struct *c;
- int return_status;
-
- c = cmd_alloc(h);
- if (!c)
- return -ENOMEM;
- return_status = fill_cmd(h, c, CCISS_RESET_MSG, NULL, 0, 0,
- CTLR_LUNID, TYPE_MSG);
- c->Request.CDB[1] = reset_type; /* fill_cmd defaults to target reset */
- if (return_status != IO_OK) {
- cmd_special_free(h, c);
- return return_status;
- }
- c->waiting = NULL;
- enqueue_cmd_and_start_io(h, c);
- /* Don't wait for completion, the reset won't complete. Don't free
- * the command either. This is the last command we will send before
- * re-initializing everything, so it doesn't matter and won't leak.
- */
- return 0;
-}
-
-static int check_target_status(ctlr_info_t *h, CommandList_struct *c)
-{
- switch (c->err_info->ScsiStatus) {
- case SAM_STAT_GOOD:
- return IO_OK;
- case SAM_STAT_CHECK_CONDITION:
- switch (0xf & c->err_info->SenseInfo[2]) {
- case 0: return IO_OK; /* no sense */
- case 1: return IO_OK; /* recovered error */
- default:
- if (check_for_unit_attention(h, c))
- return IO_NEEDS_RETRY;
- dev_warn(&h->pdev->dev, "cmd 0x%02x "
- "check condition, sense key = 0x%02x\n",
- c->Request.CDB[0], c->err_info->SenseInfo[2]);
- }
- break;
- default:
- dev_warn(&h->pdev->dev, "cmd 0x%02x"
- "scsi status = 0x%02x\n",
- c->Request.CDB[0], c->err_info->ScsiStatus);
- break;
- }
- return IO_ERROR;
-}
-
-static int process_sendcmd_error(ctlr_info_t *h, CommandList_struct *c)
-{
- int return_status = IO_OK;
-
- if (c->err_info->CommandStatus == CMD_SUCCESS)
- return IO_OK;
-
- switch (c->err_info->CommandStatus) {
- case CMD_TARGET_STATUS:
- return_status = check_target_status(h, c);
- break;
- case CMD_DATA_UNDERRUN:
- case CMD_DATA_OVERRUN:
- /* expected for inquiry and report lun commands */
- break;
- case CMD_INVALID:
- dev_warn(&h->pdev->dev, "cmd 0x%02x is "
- "reported invalid\n", c->Request.CDB[0]);
- return_status = IO_ERROR;
- break;
- case CMD_PROTOCOL_ERR:
- dev_warn(&h->pdev->dev, "cmd 0x%02x has "
- "protocol error\n", c->Request.CDB[0]);
- return_status = IO_ERROR;
- break;
- case CMD_HARDWARE_ERR:
- dev_warn(&h->pdev->dev, "cmd 0x%02x had "
- " hardware error\n", c->Request.CDB[0]);
- return_status = IO_ERROR;
- break;
- case CMD_CONNECTION_LOST:
- dev_warn(&h->pdev->dev, "cmd 0x%02x had "
- "connection lost\n", c->Request.CDB[0]);
- return_status = IO_ERROR;
- break;
- case CMD_ABORTED:
- dev_warn(&h->pdev->dev, "cmd 0x%02x was "
- "aborted\n", c->Request.CDB[0]);
- return_status = IO_ERROR;
- break;
- case CMD_ABORT_FAILED:
- dev_warn(&h->pdev->dev, "cmd 0x%02x reports "
- "abort failed\n", c->Request.CDB[0]);
- return_status = IO_ERROR;
- break;
- case CMD_UNSOLICITED_ABORT:
- dev_warn(&h->pdev->dev, "unsolicited abort 0x%02x\n",
- c->Request.CDB[0]);
- return_status = IO_NEEDS_RETRY;
- break;
- case CMD_UNABORTABLE:
- dev_warn(&h->pdev->dev, "cmd unabortable\n");
- return_status = IO_ERROR;
- break;
- default:
- dev_warn(&h->pdev->dev, "cmd 0x%02x returned "
- "unknown status %x\n", c->Request.CDB[0],
- c->err_info->CommandStatus);
- return_status = IO_ERROR;
- }
- return return_status;
-}
-
-static int sendcmd_withirq_core(ctlr_info_t *h, CommandList_struct *c,
- int attempt_retry)
-{
- DECLARE_COMPLETION_ONSTACK(wait);
- u64bit buff_dma_handle;
- int return_status = IO_OK;
-
-resend_cmd2:
- c->waiting = &wait;
- enqueue_cmd_and_start_io(h, c);
-
- wait_for_completion(&wait);
-
- if (c->err_info->CommandStatus == 0 || !attempt_retry)
- goto command_done;
-
- return_status = process_sendcmd_error(h, c);
-
- if (return_status == IO_NEEDS_RETRY &&
- c->retry_count < MAX_CMD_RETRIES) {
- dev_warn(&h->pdev->dev, "retrying 0x%02x\n",
- c->Request.CDB[0]);
- c->retry_count++;
- /* erase the old error information */
- memset(c->err_info, 0, sizeof(ErrorInfo_struct));
- return_status = IO_OK;
- reinit_completion(&wait);
- goto resend_cmd2;
- }
-
-command_done:
- /* unlock the buffers from DMA */
- buff_dma_handle.val32.lower = c->SG[0].Addr.lower;
- buff_dma_handle.val32.upper = c->SG[0].Addr.upper;
- pci_unmap_single(h->pdev, (dma_addr_t) buff_dma_handle.val,
- c->SG[0].Len, PCI_DMA_BIDIRECTIONAL);
- return return_status;
-}
-
-static int sendcmd_withirq(ctlr_info_t *h, __u8 cmd, void *buff, size_t size,
- __u8 page_code, unsigned char scsi3addr[],
- int cmd_type)
-{
- CommandList_struct *c;
- int return_status;
-
- c = cmd_special_alloc(h);
- if (!c)
- return -ENOMEM;
- return_status = fill_cmd(h, c, cmd, buff, size, page_code,
- scsi3addr, cmd_type);
- if (return_status == IO_OK)
- return_status = sendcmd_withirq_core(h, c, 1);
-
- cmd_special_free(h, c);
- return return_status;
-}
-
-static void cciss_geometry_inquiry(ctlr_info_t *h, int logvol,
- sector_t total_size,
- unsigned int block_size,
- InquiryData_struct *inq_buff,
- drive_info_struct *drv)
-{
- int return_code;
- unsigned long t;
- unsigned char scsi3addr[8];
-
- memset(inq_buff, 0, sizeof(InquiryData_struct));
- log_unit_to_scsi3addr(h, scsi3addr, logvol);
- return_code = sendcmd_withirq(h, CISS_INQUIRY, inq_buff,
- sizeof(*inq_buff), 0xC1, scsi3addr, TYPE_CMD);
- if (return_code == IO_OK) {
- if (inq_buff->data_byte[8] == 0xFF) {
- dev_warn(&h->pdev->dev,
- "reading geometry failed, volume "
- "does not support reading geometry\n");
- drv->heads = 255;
- drv->sectors = 32; /* Sectors per track */
- drv->cylinders = total_size + 1;
- drv->raid_level = RAID_UNKNOWN;
- } else {
- drv->heads = inq_buff->data_byte[6];
- drv->sectors = inq_buff->data_byte[7];
- drv->cylinders = (inq_buff->data_byte[4] & 0xff) << 8;
- drv->cylinders += inq_buff->data_byte[5];
- drv->raid_level = inq_buff->data_byte[8];
- }
- drv->block_size = block_size;
- drv->nr_blocks = total_size + 1;
- t = drv->heads * drv->sectors;
- if (t > 1) {
- sector_t real_size = total_size + 1;
- unsigned long rem = sector_div(real_size, t);
- if (rem)
- real_size++;
- drv->cylinders = real_size;
- }
- } else { /* Get geometry failed */
- dev_warn(&h->pdev->dev, "reading geometry failed\n");
- }
-}
-
-static void
-cciss_read_capacity(ctlr_info_t *h, int logvol, sector_t *total_size,
- unsigned int *block_size)
-{
- ReadCapdata_struct *buf;
- int return_code;
- unsigned char scsi3addr[8];
-
- buf = kzalloc(sizeof(ReadCapdata_struct), GFP_KERNEL);
- if (!buf) {
- dev_warn(&h->pdev->dev, "out of memory\n");
- return;
- }
-
- log_unit_to_scsi3addr(h, scsi3addr, logvol);
- return_code = sendcmd_withirq(h, CCISS_READ_CAPACITY, buf,
- sizeof(ReadCapdata_struct), 0, scsi3addr, TYPE_CMD);
- if (return_code == IO_OK) {
- *total_size = be32_to_cpu(*(__be32 *) buf->total_size);
- *block_size = be32_to_cpu(*(__be32 *) buf->block_size);
- } else { /* read capacity command failed */
- dev_warn(&h->pdev->dev, "read capacity failed\n");
- *total_size = 0;
- *block_size = BLOCK_SIZE;
- }
- kfree(buf);
-}
-
-static void cciss_read_capacity_16(ctlr_info_t *h, int logvol,
- sector_t *total_size, unsigned int *block_size)
-{
- ReadCapdata_struct_16 *buf;
- int return_code;
- unsigned char scsi3addr[8];
-
- buf = kzalloc(sizeof(ReadCapdata_struct_16), GFP_KERNEL);
- if (!buf) {
- dev_warn(&h->pdev->dev, "out of memory\n");
- return;
- }
-
- log_unit_to_scsi3addr(h, scsi3addr, logvol);
- return_code = sendcmd_withirq(h, CCISS_READ_CAPACITY_16,
- buf, sizeof(ReadCapdata_struct_16),
- 0, scsi3addr, TYPE_CMD);
- if (return_code == IO_OK) {
- *total_size = be64_to_cpu(*(__be64 *) buf->total_size);
- *block_size = be32_to_cpu(*(__be32 *) buf->block_size);
- } else { /* read capacity command failed */
- dev_warn(&h->pdev->dev, "read capacity failed\n");
- *total_size = 0;
- *block_size = BLOCK_SIZE;
- }
- dev_info(&h->pdev->dev, " blocks= %llu block_size= %d\n",
- (unsigned long long)*total_size+1, *block_size);
- kfree(buf);
-}
-
-static int cciss_revalidate(struct gendisk *disk)
-{
- ctlr_info_t *h = get_host(disk);
- drive_info_struct *drv = get_drv(disk);
- int logvol;
- int FOUND = 0;
- unsigned int block_size;
- sector_t total_size;
- InquiryData_struct *inq_buff = NULL;
-
- for (logvol = 0; logvol <= h->highest_lun; logvol++) {
- if (!h->drv[logvol])
- continue;
- if (memcmp(h->drv[logvol]->LunID, drv->LunID,
- sizeof(drv->LunID)) == 0) {
- FOUND = 1;
- break;
- }
- }
-
- if (!FOUND)
- return 1;
-
- inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL);
- if (inq_buff == NULL) {
- dev_warn(&h->pdev->dev, "out of memory\n");
- return 1;
- }
- if (h->cciss_read == CCISS_READ_10) {
- cciss_read_capacity(h, logvol,
- &total_size, &block_size);
- } else {
- cciss_read_capacity_16(h, logvol,
- &total_size, &block_size);
- }
- cciss_geometry_inquiry(h, logvol, total_size, block_size,
- inq_buff, drv);
-
- blk_queue_logical_block_size(drv->queue, drv->block_size);
- set_capacity(disk, drv->nr_blocks);
-
- kfree(inq_buff);
- return 0;
-}
-
-/*
- * Map (physical) PCI mem into (virtual) kernel space
- */
-static void __iomem *remap_pci_mem(ulong base, ulong size)
-{
- ulong page_base = ((ulong) base) & PAGE_MASK;
- ulong page_offs = ((ulong) base) - page_base;
- void __iomem *page_remapped = ioremap(page_base, page_offs + size);
-
- return page_remapped ? (page_remapped + page_offs) : NULL;
-}
-
-/*
- * Takes jobs of the Q and sends them to the hardware, then puts it on
- * the Q to wait for completion.
- */
-static void start_io(ctlr_info_t *h)
-{
- CommandList_struct *c;
-
- while (!list_empty(&h->reqQ)) {
- c = list_entry(h->reqQ.next, CommandList_struct, list);
- /* can't do anything if fifo is full */
- if ((h->access.fifo_full(h))) {
- dev_warn(&h->pdev->dev, "fifo full\n");
- break;
- }
-
- /* Get the first entry from the Request Q */
- removeQ(c);
- h->Qdepth--;
-
- /* Tell the controller execute command */
- h->access.submit_command(h, c);
-
- /* Put job onto the completed Q */
- addQ(&h->cmpQ, c);
- }
-}
-
-/* Assumes that h->lock is held. */
-/* Zeros out the error record and then resends the command back */
-/* to the controller */
-static inline void resend_cciss_cmd(ctlr_info_t *h, CommandList_struct *c)
-{
- /* erase the old error information */
- memset(c->err_info, 0, sizeof(ErrorInfo_struct));
-
- /* add it to software queue and then send it to the controller */
- addQ(&h->reqQ, c);
- h->Qdepth++;
- if (h->Qdepth > h->maxQsinceinit)
- h->maxQsinceinit = h->Qdepth;
-
- start_io(h);
-}
-
-static inline unsigned int make_status_bytes(unsigned int scsi_status_byte,
- unsigned int msg_byte, unsigned int host_byte,
- unsigned int driver_byte)
-{
- /* inverse of macros in scsi.h */
- return (scsi_status_byte & 0xff) |
- ((msg_byte & 0xff) << 8) |
- ((host_byte & 0xff) << 16) |
- ((driver_byte & 0xff) << 24);
-}
-
-static inline int evaluate_target_status(ctlr_info_t *h,
- CommandList_struct *cmd, int *retry_cmd)
-{
- unsigned char sense_key;
- unsigned char status_byte, msg_byte, host_byte, driver_byte;
- int error_value;
-
- *retry_cmd = 0;
- /* If we get in here, it means we got "target status", that is, scsi status */
- status_byte = cmd->err_info->ScsiStatus;
- driver_byte = DRIVER_OK;
- msg_byte = cmd->err_info->CommandStatus; /* correct? seems too device specific */
-
- if (blk_rq_is_passthrough(cmd->rq))
- host_byte = DID_PASSTHROUGH;
- else
- host_byte = DID_OK;
-
- error_value = make_status_bytes(status_byte, msg_byte,
- host_byte, driver_byte);
-
- if (cmd->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION) {
- if (!blk_rq_is_passthrough(cmd->rq))
- dev_warn(&h->pdev->dev, "cmd %p "
- "has SCSI Status 0x%x\n",
- cmd, cmd->err_info->ScsiStatus);
- return error_value;
- }
-
- /* check the sense key */
- sense_key = 0xf & cmd->err_info->SenseInfo[2];
- /* no status or recovered error */
- if (((sense_key == 0x0) || (sense_key == 0x1)) &&
- !blk_rq_is_passthrough(cmd->rq))
- error_value = 0;
-
- if (check_for_unit_attention(h, cmd)) {
- *retry_cmd = !blk_rq_is_passthrough(cmd->rq);
- return 0;
- }
-
- /* Not SG_IO or similar? */
- if (!blk_rq_is_passthrough(cmd->rq)) {
- if (error_value != 0)
- dev_warn(&h->pdev->dev, "cmd %p has CHECK CONDITION"
- " sense key = 0x%x\n", cmd, sense_key);
- return error_value;
- }
-
- scsi_req(cmd->rq)->sense_len = cmd->err_info->SenseLen;
- return error_value;
-}
-
-/* checks the status of the job and calls complete buffers to mark all
- * buffers for the completed job. Note that this function does not need
- * to hold the hba/queue lock.
- */
-static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd,
- int timeout)
-{
- int retry_cmd = 0;
- struct request *rq = cmd->rq;
- struct scsi_request *sreq = scsi_req(rq);
-
- sreq->result = 0;
-
- if (timeout)
- sreq->result = make_status_bytes(0, 0, 0, DRIVER_TIMEOUT);
-
- if (cmd->err_info->CommandStatus == 0) /* no error has occurred */
- goto after_error_processing;
-
- switch (cmd->err_info->CommandStatus) {
- case CMD_TARGET_STATUS:
- sreq->result = evaluate_target_status(h, cmd, &retry_cmd);
- break;
- case CMD_DATA_UNDERRUN:
- if (!blk_rq_is_passthrough(cmd->rq)) {
- dev_warn(&h->pdev->dev, "cmd %p has"
- " completed with data underrun "
- "reported\n", cmd);
- }
- break;
- case CMD_DATA_OVERRUN:
- if (!blk_rq_is_passthrough(cmd->rq))
- dev_warn(&h->pdev->dev, "cciss: cmd %p has"
- " completed with data overrun "
- "reported\n", cmd);
- break;
- case CMD_INVALID:
- dev_warn(&h->pdev->dev, "cciss: cmd %p is "
- "reported invalid\n", cmd);
- sreq->result = make_status_bytes(SAM_STAT_GOOD,
- cmd->err_info->CommandStatus, DRIVER_OK,
- blk_rq_is_passthrough(cmd->rq) ?
- DID_PASSTHROUGH : DID_ERROR);
- break;
- case CMD_PROTOCOL_ERR:
- dev_warn(&h->pdev->dev, "cciss: cmd %p has "
- "protocol error\n", cmd);
- sreq->result = make_status_bytes(SAM_STAT_GOOD,
- cmd->err_info->CommandStatus, DRIVER_OK,
- blk_rq_is_passthrough(cmd->rq) ?
- DID_PASSTHROUGH : DID_ERROR);
- break;
- case CMD_HARDWARE_ERR:
- dev_warn(&h->pdev->dev, "cciss: cmd %p had "
- " hardware error\n", cmd);
- sreq->result = make_status_bytes(SAM_STAT_GOOD,
- cmd->err_info->CommandStatus, DRIVER_OK,
- blk_rq_is_passthrough(cmd->rq) ?
- DID_PASSTHROUGH : DID_ERROR);
- break;
- case CMD_CONNECTION_LOST:
- dev_warn(&h->pdev->dev, "cciss: cmd %p had "
- "connection lost\n", cmd);
- sreq->result = make_status_bytes(SAM_STAT_GOOD,
- cmd->err_info->CommandStatus, DRIVER_OK,
- blk_rq_is_passthrough(cmd->rq) ?
- DID_PASSTHROUGH : DID_ERROR);
- break;
- case CMD_ABORTED:
- dev_warn(&h->pdev->dev, "cciss: cmd %p was "
- "aborted\n", cmd);
- sreq->result = make_status_bytes(SAM_STAT_GOOD,
- cmd->err_info->CommandStatus, DRIVER_OK,
- blk_rq_is_passthrough(cmd->rq) ?
- DID_PASSTHROUGH : DID_ABORT);
- break;
- case CMD_ABORT_FAILED:
- dev_warn(&h->pdev->dev, "cciss: cmd %p reports "
- "abort failed\n", cmd);
- sreq->result = make_status_bytes(SAM_STAT_GOOD,
- cmd->err_info->CommandStatus, DRIVER_OK,
- blk_rq_is_passthrough(cmd->rq) ?
- DID_PASSTHROUGH : DID_ERROR);
- break;
- case CMD_UNSOLICITED_ABORT:
- dev_warn(&h->pdev->dev, "cciss%d: unsolicited "
- "abort %p\n", h->ctlr, cmd);
- if (cmd->retry_count < MAX_CMD_RETRIES) {
- retry_cmd = 1;
- dev_warn(&h->pdev->dev, "retrying %p\n", cmd);
- cmd->retry_count++;
- } else
- dev_warn(&h->pdev->dev,
- "%p retried too many times\n", cmd);
- sreq->result = make_status_bytes(SAM_STAT_GOOD,
- cmd->err_info->CommandStatus, DRIVER_OK,
- blk_rq_is_passthrough(cmd->rq) ?
- DID_PASSTHROUGH : DID_ABORT);
- break;
- case CMD_TIMEOUT:
- dev_warn(&h->pdev->dev, "cmd %p timedout\n", cmd);
- sreq->result = make_status_bytes(SAM_STAT_GOOD,
- cmd->err_info->CommandStatus, DRIVER_OK,
- blk_rq_is_passthrough(cmd->rq) ?
- DID_PASSTHROUGH : DID_ERROR);
- break;
- case CMD_UNABORTABLE:
- dev_warn(&h->pdev->dev, "cmd %p unabortable\n", cmd);
- sreq->result = make_status_bytes(SAM_STAT_GOOD,
- cmd->err_info->CommandStatus, DRIVER_OK,
- blk_rq_is_passthrough(cmd->rq) ?
- DID_PASSTHROUGH : DID_ERROR);
- break;
- default:
- dev_warn(&h->pdev->dev, "cmd %p returned "
- "unknown status %x\n", cmd,
- cmd->err_info->CommandStatus);
- sreq->result = make_status_bytes(SAM_STAT_GOOD,
- cmd->err_info->CommandStatus, DRIVER_OK,
- blk_rq_is_passthrough(cmd->rq) ?
- DID_PASSTHROUGH : DID_ERROR);
- }
-
-after_error_processing:
-
- /* We need to return this command */
- if (retry_cmd) {
- resend_cciss_cmd(h, cmd);
- return;
- }
- cmd->rq->completion_data = cmd;
- blk_complete_request(cmd->rq);
-}
-
-static inline u32 cciss_tag_contains_index(u32 tag)
-{
-#define DIRECT_LOOKUP_BIT 0x10
- return tag & DIRECT_LOOKUP_BIT;
-}
-
-static inline u32 cciss_tag_to_index(u32 tag)
-{
-#define DIRECT_LOOKUP_SHIFT 5
- return tag >> DIRECT_LOOKUP_SHIFT;
-}
-
-static inline u32 cciss_tag_discard_error_bits(ctlr_info_t *h, u32 tag)
-{
-#define CCISS_PERF_ERROR_BITS ((1 << DIRECT_LOOKUP_SHIFT) - 1)
-#define CCISS_SIMPLE_ERROR_BITS 0x03
- if (likely(h->transMethod & CFGTBL_Trans_Performant))
- return tag & ~CCISS_PERF_ERROR_BITS;
- return tag & ~CCISS_SIMPLE_ERROR_BITS;
-}
-
-static inline void cciss_mark_tag_indexed(u32 *tag)
-{
- *tag |= DIRECT_LOOKUP_BIT;
-}
-
-static inline void cciss_set_tag_index(u32 *tag, u32 index)
-{
- *tag |= (index << DIRECT_LOOKUP_SHIFT);
-}
-
-/*
- * Get a request and submit it to the controller.
- */
-static void do_cciss_request(struct request_queue *q)
-{
- ctlr_info_t *h = q->queuedata;
- CommandList_struct *c;
- sector_t start_blk;
- int seg;
- struct request *creq;
- u64bit temp64;
- struct scatterlist *tmp_sg;
- SGDescriptor_struct *curr_sg;
- drive_info_struct *drv;
- int i, dir;
- int sg_index = 0;
- int chained = 0;
-
- queue:
- creq = blk_peek_request(q);
- if (!creq)
- goto startio;
-
- BUG_ON(creq->nr_phys_segments > h->maxsgentries);
-
- c = cmd_alloc(h);
- if (!c)
- goto full;
-
- blk_start_request(creq);
-
- tmp_sg = h->scatter_list[c->cmdindex];
- spin_unlock_irq(q->queue_lock);
-
- c->cmd_type = CMD_RWREQ;
- c->rq = creq;
-
- /* fill in the request */
- drv = creq->rq_disk->private_data;
- c->Header.ReplyQueue = 0; /* unused in simple mode */
- /* got command from pool, so use the command block index instead */
- /* for direct lookups. */
- /* The first 2 bits are reserved for controller error reporting. */
- cciss_set_tag_index(&c->Header.Tag.lower, c->cmdindex);
- cciss_mark_tag_indexed(&c->Header.Tag.lower);
- memcpy(&c->Header.LUN, drv->LunID, sizeof(drv->LunID));
- c->Request.CDBLen = 10; /* 12 byte commands not in FW yet; */
- c->Request.Type.Type = TYPE_CMD; /* It is a command. */
- c->Request.Type.Attribute = ATTR_SIMPLE;
- c->Request.Type.Direction =
- (rq_data_dir(creq) == READ) ? XFER_READ : XFER_WRITE;
- c->Request.Timeout = 0; /* Don't time out */
- c->Request.CDB[0] =
- (rq_data_dir(creq) == READ) ? h->cciss_read : h->cciss_write;
- start_blk = blk_rq_pos(creq);
- dev_dbg(&h->pdev->dev, "sector =%d nr_sectors=%d\n",
- (int)blk_rq_pos(creq), (int)blk_rq_sectors(creq));
- sg_init_table(tmp_sg, h->maxsgentries);
- seg = blk_rq_map_sg(q, creq, tmp_sg);
-
- /* get the DMA records for the setup */
- if (c->Request.Type.Direction == XFER_READ)
- dir = PCI_DMA_FROMDEVICE;
- else
- dir = PCI_DMA_TODEVICE;
-
- curr_sg = c->SG;
- sg_index = 0;
- chained = 0;
-
- for (i = 0; i < seg; i++) {
- if (((sg_index+1) == (h->max_cmd_sgentries)) &&
- !chained && ((seg - i) > 1)) {
- /* Point to next chain block. */
- curr_sg = h->cmd_sg_list[c->cmdindex];
- sg_index = 0;
- chained = 1;
- }
- curr_sg[sg_index].Len = tmp_sg[i].length;
- temp64.val = (__u64) pci_map_page(h->pdev, sg_page(&tmp_sg[i]),
- tmp_sg[i].offset,
- tmp_sg[i].length, dir);
- if (dma_mapping_error(&h->pdev->dev, temp64.val)) {
- dev_warn(&h->pdev->dev,
- "%s: error mapping page for DMA\n", __func__);
- scsi_req(creq)->result =
- make_status_bytes(SAM_STAT_GOOD, 0, DRIVER_OK,
- DID_SOFT_ERROR);
- cmd_free(h, c);
- return;
- }
- curr_sg[sg_index].Addr.lower = temp64.val32.lower;
- curr_sg[sg_index].Addr.upper = temp64.val32.upper;
- curr_sg[sg_index].Ext = 0; /* we are not chaining */
- ++sg_index;
- }
- if (chained) {
- if (cciss_map_sg_chain_block(h, c, h->cmd_sg_list[c->cmdindex],
- (seg - (h->max_cmd_sgentries - 1)) *
- sizeof(SGDescriptor_struct))) {
- scsi_req(creq)->result =
- make_status_bytes(SAM_STAT_GOOD, 0, DRIVER_OK,
- DID_SOFT_ERROR);
- cmd_free(h, c);
- return;
- }
- }
-
- /* track how many SG entries we are using */
- if (seg > h->maxSG)
- h->maxSG = seg;
-
- dev_dbg(&h->pdev->dev, "Submitting %u sectors in %d segments "
- "chained[%d]\n",
- blk_rq_sectors(creq), seg, chained);
-
- c->Header.SGTotal = seg + chained;
- if (seg <= h->max_cmd_sgentries)
- c->Header.SGList = c->Header.SGTotal;
- else
- c->Header.SGList = h->max_cmd_sgentries;
- set_performant_mode(h, c);
-
- switch (req_op(creq)) {
- case REQ_OP_READ:
- case REQ_OP_WRITE:
- if(h->cciss_read == CCISS_READ_10) {
- c->Request.CDB[1] = 0;
- c->Request.CDB[2] = (start_blk >> 24) & 0xff; /* MSB */
- c->Request.CDB[3] = (start_blk >> 16) & 0xff;
- c->Request.CDB[4] = (start_blk >> 8) & 0xff;
- c->Request.CDB[5] = start_blk & 0xff;
- c->Request.CDB[6] = 0; /* (sect >> 24) & 0xff; MSB */
- c->Request.CDB[7] = (blk_rq_sectors(creq) >> 8) & 0xff;
- c->Request.CDB[8] = blk_rq_sectors(creq) & 0xff;
- c->Request.CDB[9] = c->Request.CDB[11] = c->Request.CDB[12] = 0;
- } else {
- u32 upper32 = upper_32_bits(start_blk);
-
- c->Request.CDBLen = 16;
- c->Request.CDB[1]= 0;
- c->Request.CDB[2]= (upper32 >> 24) & 0xff; /* MSB */
- c->Request.CDB[3]= (upper32 >> 16) & 0xff;
- c->Request.CDB[4]= (upper32 >> 8) & 0xff;
- c->Request.CDB[5]= upper32 & 0xff;
- c->Request.CDB[6]= (start_blk >> 24) & 0xff;
- c->Request.CDB[7]= (start_blk >> 16) & 0xff;
- c->Request.CDB[8]= (start_blk >> 8) & 0xff;
- c->Request.CDB[9]= start_blk & 0xff;
- c->Request.CDB[10]= (blk_rq_sectors(creq) >> 24) & 0xff;
- c->Request.CDB[11]= (blk_rq_sectors(creq) >> 16) & 0xff;
- c->Request.CDB[12]= (blk_rq_sectors(creq) >> 8) & 0xff;
- c->Request.CDB[13]= blk_rq_sectors(creq) & 0xff;
- c->Request.CDB[14] = c->Request.CDB[15] = 0;
- }
- break;
- case REQ_OP_SCSI_IN:
- case REQ_OP_SCSI_OUT:
- c->Request.CDBLen = scsi_req(creq)->cmd_len;
- memcpy(c->Request.CDB, scsi_req(creq)->cmd, BLK_MAX_CDB);
- scsi_req(creq)->sense = c->err_info->SenseInfo;
- break;
- default:
- dev_warn(&h->pdev->dev, "bad request type %d\n",
- creq->cmd_flags);
- BUG();
- }
-
- spin_lock_irq(q->queue_lock);
-
- addQ(&h->reqQ, c);
- h->Qdepth++;
- if (h->Qdepth > h->maxQsinceinit)
- h->maxQsinceinit = h->Qdepth;
-
- goto queue;
-full:
- blk_stop_queue(q);
-startio:
- /* We will already have the driver lock here so not need
- * to lock it.
- */
- start_io(h);
-}
-
-static inline unsigned long get_next_completion(ctlr_info_t *h)
-{
- return h->access.command_completed(h);
-}
-
-static inline int interrupt_pending(ctlr_info_t *h)
-{
- return h->access.intr_pending(h);
-}
-
-static inline long interrupt_not_for_us(ctlr_info_t *h)
-{
- return ((h->access.intr_pending(h) == 0) ||
- (h->interrupts_enabled == 0));
-}
-
-static inline int bad_tag(ctlr_info_t *h, u32 tag_index,
- u32 raw_tag)
-{
- if (unlikely(tag_index >= h->nr_cmds)) {
- dev_warn(&h->pdev->dev, "bad tag 0x%08x ignored.\n", raw_tag);
- return 1;
- }
- return 0;
-}
-
-static inline void finish_cmd(ctlr_info_t *h, CommandList_struct *c,
- u32 raw_tag)
-{
- removeQ(c);
- if (likely(c->cmd_type == CMD_RWREQ))
- complete_command(h, c, 0);
- else if (c->cmd_type == CMD_IOCTL_PEND)
- complete(c->waiting);
-#ifdef CONFIG_CISS_SCSI_TAPE
- else if (c->cmd_type == CMD_SCSI)
- complete_scsi_command(c, 0, raw_tag);
-#endif
-}
-
-static inline u32 next_command(ctlr_info_t *h)
-{
- u32 a;
-
- if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
- return h->access.command_completed(h);
-
- if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
- a = *(h->reply_pool_head); /* Next cmd in ring buffer */
- (h->reply_pool_head)++;
- h->commands_outstanding--;
- } else {
- a = FIFO_EMPTY;
- }
- /* Check for wraparound */
- if (h->reply_pool_head == (h->reply_pool + h->max_commands)) {
- h->reply_pool_head = h->reply_pool;
- h->reply_pool_wraparound ^= 1;
- }
- return a;
-}
-
-/* process completion of an indexed ("direct lookup") command */
-static inline u32 process_indexed_cmd(ctlr_info_t *h, u32 raw_tag)
-{
- u32 tag_index;
- CommandList_struct *c;
-
- tag_index = cciss_tag_to_index(raw_tag);
- if (bad_tag(h, tag_index, raw_tag))
- return next_command(h);
- c = h->cmd_pool + tag_index;
- finish_cmd(h, c, raw_tag);
- return next_command(h);
-}
-
-/* process completion of a non-indexed command */
-static inline u32 process_nonindexed_cmd(ctlr_info_t *h, u32 raw_tag)
-{
- CommandList_struct *c = NULL;
- __u32 busaddr_masked, tag_masked;
-
- tag_masked = cciss_tag_discard_error_bits(h, raw_tag);
- list_for_each_entry(c, &h->cmpQ, list) {
- busaddr_masked = cciss_tag_discard_error_bits(h, c->busaddr);
- if (busaddr_masked == tag_masked) {
- finish_cmd(h, c, raw_tag);
- return next_command(h);
- }
- }
- bad_tag(h, h->nr_cmds + 1, raw_tag);
- return next_command(h);
-}
-
-/* Some controllers, like p400, will give us one interrupt
- * after a soft reset, even if we turned interrupts off.
- * Only need to check for this in the cciss_xxx_discard_completions
- * functions.
- */
-static int ignore_bogus_interrupt(ctlr_info_t *h)
-{
- if (likely(!reset_devices))
- return 0;
-
- if (likely(h->interrupts_enabled))
- return 0;
-
- dev_info(&h->pdev->dev, "Received interrupt while interrupts disabled "
- "(known firmware bug.) Ignoring.\n");
-
- return 1;
-}
-
-static irqreturn_t cciss_intx_discard_completions(int irq, void *dev_id)
-{
- ctlr_info_t *h = dev_id;
- unsigned long flags;
- u32 raw_tag;
-
- if (ignore_bogus_interrupt(h))
- return IRQ_NONE;
-
- if (interrupt_not_for_us(h))
- return IRQ_NONE;
- spin_lock_irqsave(&h->lock, flags);
- while (interrupt_pending(h)) {
- raw_tag = get_next_completion(h);
- while (raw_tag != FIFO_EMPTY)
- raw_tag = next_command(h);
- }
- spin_unlock_irqrestore(&h->lock, flags);
- return IRQ_HANDLED;
-}
-
-static irqreturn_t cciss_msix_discard_completions(int irq, void *dev_id)
-{
- ctlr_info_t *h = dev_id;
- unsigned long flags;
- u32 raw_tag;
-
- if (ignore_bogus_interrupt(h))
- return IRQ_NONE;
-
- spin_lock_irqsave(&h->lock, flags);
- raw_tag = get_next_completion(h);
- while (raw_tag != FIFO_EMPTY)
- raw_tag = next_command(h);
- spin_unlock_irqrestore(&h->lock, flags);
- return IRQ_HANDLED;
-}
-
-static irqreturn_t do_cciss_intx(int irq, void *dev_id)
-{
- ctlr_info_t *h = dev_id;
- unsigned long flags;
- u32 raw_tag;
-
- if (interrupt_not_for_us(h))
- return IRQ_NONE;
- spin_lock_irqsave(&h->lock, flags);
- while (interrupt_pending(h)) {
- raw_tag = get_next_completion(h);
- while (raw_tag != FIFO_EMPTY) {
- if (cciss_tag_contains_index(raw_tag))
- raw_tag = process_indexed_cmd(h, raw_tag);
- else
- raw_tag = process_nonindexed_cmd(h, raw_tag);
- }
- }
- spin_unlock_irqrestore(&h->lock, flags);
- return IRQ_HANDLED;
-}
-
-/* Add a second interrupt handler for MSI/MSI-X mode. In this mode we never
- * check the interrupt pending register because it is not set.
- */
-static irqreturn_t do_cciss_msix_intr(int irq, void *dev_id)
-{
- ctlr_info_t *h = dev_id;
- unsigned long flags;
- u32 raw_tag;
-
- spin_lock_irqsave(&h->lock, flags);
- raw_tag = get_next_completion(h);
- while (raw_tag != FIFO_EMPTY) {
- if (cciss_tag_contains_index(raw_tag))
- raw_tag = process_indexed_cmd(h, raw_tag);
- else
- raw_tag = process_nonindexed_cmd(h, raw_tag);
- }
- spin_unlock_irqrestore(&h->lock, flags);
- return IRQ_HANDLED;
-}
-
-/**
- * add_to_scan_list() - add controller to rescan queue
- * @h: Pointer to the controller.
- *
- * Adds the controller to the rescan queue if not already on the queue.
- *
- * returns 1 if added to the queue, 0 if skipped (could be on the
- * queue already, or the controller could be initializing or shutting
- * down).
- **/
-static int add_to_scan_list(struct ctlr_info *h)
-{
- struct ctlr_info *test_h;
- int found = 0;
- int ret = 0;
-
- if (h->busy_initializing)
- return 0;
-
- if (!mutex_trylock(&h->busy_shutting_down))
- return 0;
-
- mutex_lock(&scan_mutex);
- list_for_each_entry(test_h, &scan_q, scan_list) {
- if (test_h == h) {
- found = 1;
- break;
- }
- }
- if (!found && !h->busy_scanning) {
- reinit_completion(&h->scan_wait);
- list_add_tail(&h->scan_list, &scan_q);
- ret = 1;
- }
- mutex_unlock(&scan_mutex);
- mutex_unlock(&h->busy_shutting_down);
-
- return ret;
-}
-
-/**
- * remove_from_scan_list() - remove controller from rescan queue
- * @h: Pointer to the controller.
- *
- * Removes the controller from the rescan queue if present. Blocks if
- * the controller is currently conducting a rescan. The controller
- * can be in one of three states:
- * 1. Doesn't need a scan
- * 2. On the scan list, but not scanning yet (we remove it)
- * 3. Busy scanning (and not on the list). In this case we want to wait for
- * the scan to complete to make sure the scanning thread for this
- * controller is completely idle.
- **/
-static void remove_from_scan_list(struct ctlr_info *h)
-{
- struct ctlr_info *test_h, *tmp_h;
-
- mutex_lock(&scan_mutex);
- list_for_each_entry_safe(test_h, tmp_h, &scan_q, scan_list) {
- if (test_h == h) { /* state 2. */
- list_del(&h->scan_list);
- complete_all(&h->scan_wait);
- mutex_unlock(&scan_mutex);
- return;
- }
- }
- if (h->busy_scanning) { /* state 3. */
- mutex_unlock(&scan_mutex);
- wait_for_completion(&h->scan_wait);
- } else { /* state 1, nothing to do. */
- mutex_unlock(&scan_mutex);
- }
-}
-
-/**
- * scan_thread() - kernel thread used to rescan controllers
- * @data: Ignored.
- *
- * A kernel thread used scan for drive topology changes on
- * controllers. The thread processes only one controller at a time
- * using a queue. Controllers are added to the queue using
- * add_to_scan_list() and removed from the queue either after done
- * processing or using remove_from_scan_list().
- *
- * returns 0.
- **/
-static int scan_thread(void *data)
-{
- struct ctlr_info *h;
-
- while (1) {
- set_current_state(TASK_INTERRUPTIBLE);
- schedule();
- if (kthread_should_stop())
- break;
-
- while (1) {
- mutex_lock(&scan_mutex);
- if (list_empty(&scan_q)) {
- mutex_unlock(&scan_mutex);
- break;
- }
-
- h = list_entry(scan_q.next,
- struct ctlr_info,
- scan_list);
- list_del(&h->scan_list);
- h->busy_scanning = 1;
- mutex_unlock(&scan_mutex);
-
- rebuild_lun_table(h, 0, 0);
- complete_all(&h->scan_wait);
- mutex_lock(&scan_mutex);
- h->busy_scanning = 0;
- mutex_unlock(&scan_mutex);
- }
- }
-
- return 0;
-}
-
-static int check_for_unit_attention(ctlr_info_t *h, CommandList_struct *c)
-{
- if (c->err_info->SenseInfo[2] != UNIT_ATTENTION)
- return 0;
-
- switch (c->err_info->SenseInfo[12]) {
- case STATE_CHANGED:
- dev_warn(&h->pdev->dev, "a state change "
- "detected, command retried\n");
- return 1;
- break;
- case LUN_FAILED:
- dev_warn(&h->pdev->dev, "LUN failure "
- "detected, action required\n");
- return 1;
- break;
- case REPORT_LUNS_CHANGED:
- dev_warn(&h->pdev->dev, "report LUN data changed\n");
- /*
- * Here, we could call add_to_scan_list and wake up the scan thread,
- * except that it's quite likely that we will get more than one
- * REPORT_LUNS_CHANGED condition in quick succession, which means
- * that those which occur after the first one will likely happen
- * *during* the scan_thread's rescan. And the rescan code is not
- * robust enough to restart in the middle, undoing what it has already
- * done, and it's not clear that it's even possible to do this, since
- * part of what it does is notify the block layer, which starts
- * doing it's own i/o to read partition tables and so on, and the
- * driver doesn't have visibility to know what might need undoing.
- * In any event, if possible, it is horribly complicated to get right
- * so we just don't do it for now.
- *
- * Note: this REPORT_LUNS_CHANGED condition only occurs on the MSA2012.
- */
- return 1;
- break;
- case POWER_OR_RESET:
- dev_warn(&h->pdev->dev,
- "a power on or device reset detected\n");
- return 1;
- break;
- case UNIT_ATTENTION_CLEARED:
- dev_warn(&h->pdev->dev,
- "unit attention cleared by another initiator\n");
- return 1;
- break;
- default:
- dev_warn(&h->pdev->dev, "unknown unit attention detected\n");
- return 1;
- }
-}
-
-/*
- * We cannot read the structure directly, for portability we must use
- * the io functions.
- * This is for debug only.
- */
-static void print_cfg_table(ctlr_info_t *h)
-{
- int i;
- char temp_name[17];
- CfgTable_struct *tb = h->cfgtable;
-
- dev_dbg(&h->pdev->dev, "Controller Configuration information\n");
- dev_dbg(&h->pdev->dev, "------------------------------------\n");
- for (i = 0; i < 4; i++)
- temp_name[i] = readb(&(tb->Signature[i]));
- temp_name[4] = '\0';
- dev_dbg(&h->pdev->dev, " Signature = %s\n", temp_name);
- dev_dbg(&h->pdev->dev, " Spec Number = %d\n",
- readl(&(tb->SpecValence)));
- dev_dbg(&h->pdev->dev, " Transport methods supported = 0x%x\n",
- readl(&(tb->TransportSupport)));
- dev_dbg(&h->pdev->dev, " Transport methods active = 0x%x\n",
- readl(&(tb->TransportActive)));
- dev_dbg(&h->pdev->dev, " Requested transport Method = 0x%x\n",
- readl(&(tb->HostWrite.TransportRequest)));
- dev_dbg(&h->pdev->dev, " Coalesce Interrupt Delay = 0x%x\n",
- readl(&(tb->HostWrite.CoalIntDelay)));
- dev_dbg(&h->pdev->dev, " Coalesce Interrupt Count = 0x%x\n",
- readl(&(tb->HostWrite.CoalIntCount)));
- dev_dbg(&h->pdev->dev, " Max outstanding commands = 0x%x\n",
- readl(&(tb->CmdsOutMax)));
- dev_dbg(&h->pdev->dev, " Bus Types = 0x%x\n",
- readl(&(tb->BusTypes)));
- for (i = 0; i < 16; i++)
- temp_name[i] = readb(&(tb->ServerName[i]));
- temp_name[16] = '\0';
- dev_dbg(&h->pdev->dev, " Server Name = %s\n", temp_name);
- dev_dbg(&h->pdev->dev, " Heartbeat Counter = 0x%x\n\n\n",
- readl(&(tb->HeartBeat)));
-}
-
-static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
-{
- int i, offset, mem_type, bar_type;
- if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */
- return 0;
- offset = 0;
- for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
- bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE;
- if (bar_type == PCI_BASE_ADDRESS_SPACE_IO)
- offset += 4;
- else {
- mem_type = pci_resource_flags(pdev, i) &
- PCI_BASE_ADDRESS_MEM_TYPE_MASK;
- switch (mem_type) {
- case PCI_BASE_ADDRESS_MEM_TYPE_32:
- case PCI_BASE_ADDRESS_MEM_TYPE_1M:
- offset += 4; /* 32 bit */
- break;
- case PCI_BASE_ADDRESS_MEM_TYPE_64:
- offset += 8;
- break;
- default: /* reserved in PCI 2.2 */
- dev_warn(&pdev->dev,
- "Base address is invalid\n");
- return -1;
- break;
- }
- }
- if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0)
- return i + 1;
- }
- return -1;
-}
-
-/* Fill in bucket_map[], given nsgs (the max number of
- * scatter gather elements supported) and bucket[],
- * which is an array of 8 integers. The bucket[] array
- * contains 8 different DMA transfer sizes (in 16
- * byte increments) which the controller uses to fetch
- * commands. This function fills in bucket_map[], which
- * maps a given number of scatter gather elements to one of
- * the 8 DMA transfer sizes. The point of it is to allow the
- * controller to only do as much DMA as needed to fetch the
- * command, with the DMA transfer size encoded in the lower
- * bits of the command address.
- */
-static void calc_bucket_map(int bucket[], int num_buckets,
- int nsgs, int *bucket_map)
-{
- int i, j, b, size;
-
- /* even a command with 0 SGs requires 4 blocks */
-#define MINIMUM_TRANSFER_BLOCKS 4
-#define NUM_BUCKETS 8
- /* Note, bucket_map must have nsgs+1 entries. */
- for (i = 0; i <= nsgs; i++) {
- /* Compute size of a command with i SG entries */
- size = i + MINIMUM_TRANSFER_BLOCKS;
- b = num_buckets; /* Assume the biggest bucket */
- /* Find the bucket that is just big enough */
- for (j = 0; j < 8; j++) {
- if (bucket[j] >= size) {
- b = j;
- break;
- }
- }
- /* for a command with i SG entries, use bucket b. */
- bucket_map[i] = b;
- }
-}
-
-static void cciss_wait_for_mode_change_ack(ctlr_info_t *h)
-{
- int i;
-
- /* under certain very rare conditions, this can take awhile.
- * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
- * as we enter this code.) */
- for (i = 0; i < MAX_CONFIG_WAIT; i++) {
- if (!(readl(h->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq))
- break;
- usleep_range(10000, 20000);
- }
-}
-
-static void cciss_enter_performant_mode(ctlr_info_t *h, u32 use_short_tags)
-{
- /* This is a bit complicated. There are 8 registers on
- * the controller which we write to to tell it 8 different
- * sizes of commands which there may be. It's a way of
- * reducing the DMA done to fetch each command. Encoded into
- * each command's tag are 3 bits which communicate to the controller
- * which of the eight sizes that command fits within. The size of
- * each command depends on how many scatter gather entries there are.
- * Each SG entry requires 16 bytes. The eight registers are programmed
- * with the number of 16-byte blocks a command of that size requires.
- * The smallest command possible requires 5 such 16 byte blocks.
- * the largest command possible requires MAXSGENTRIES + 4 16-byte
- * blocks. Note, this only extends to the SG entries contained
- * within the command block, and does not extend to chained blocks
- * of SG elements. bft[] contains the eight values we write to
- * the registers. They are not evenly distributed, but have more
- * sizes for small commands, and fewer sizes for larger commands.
- */
- __u32 trans_offset;
- int bft[8] = { 5, 6, 8, 10, 12, 20, 28, MAXSGENTRIES + 4};
- /*
- * 5 = 1 s/g entry or 4k
- * 6 = 2 s/g entry or 8k
- * 8 = 4 s/g entry or 16k
- * 10 = 6 s/g entry or 24k
- */
- unsigned long register_value;
- BUILD_BUG_ON(28 > MAXSGENTRIES + 4);
-
- h->reply_pool_wraparound = 1; /* spec: init to 1 */
-
- /* Controller spec: zero out this buffer. */
- memset(h->reply_pool, 0, h->max_commands * sizeof(__u64));
- h->reply_pool_head = h->reply_pool;
-
- trans_offset = readl(&(h->cfgtable->TransMethodOffset));
- calc_bucket_map(bft, ARRAY_SIZE(bft), h->maxsgentries,
- h->blockFetchTable);
- writel(bft[0], &h->transtable->BlockFetch0);
- writel(bft[1], &h->transtable->BlockFetch1);
- writel(bft[2], &h->transtable->BlockFetch2);
- writel(bft[3], &h->transtable->BlockFetch3);
- writel(bft[4], &h->transtable->BlockFetch4);
- writel(bft[5], &h->transtable->BlockFetch5);
- writel(bft[6], &h->transtable->BlockFetch6);
- writel(bft[7], &h->transtable->BlockFetch7);
-
- /* size of controller ring buffer */
- writel(h->max_commands, &h->transtable->RepQSize);
- writel(1, &h->transtable->RepQCount);
- writel(0, &h->transtable->RepQCtrAddrLow32);
- writel(0, &h->transtable->RepQCtrAddrHigh32);
- writel(h->reply_pool_dhandle, &h->transtable->RepQAddr0Low32);
- writel(0, &h->transtable->RepQAddr0High32);
- writel(CFGTBL_Trans_Performant | use_short_tags,
- &(h->cfgtable->HostWrite.TransportRequest));
-
- writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
- cciss_wait_for_mode_change_ack(h);
- register_value = readl(&(h->cfgtable->TransportActive));
- if (!(register_value & CFGTBL_Trans_Performant))
- dev_warn(&h->pdev->dev, "cciss: unable to get board into"
- " performant mode\n");
-}
-
-static void cciss_put_controller_into_performant_mode(ctlr_info_t *h)
-{
- __u32 trans_support;
-
- if (cciss_simple_mode)
- return;
-
- dev_dbg(&h->pdev->dev, "Trying to put board into Performant mode\n");
- /* Attempt to put controller into performant mode if supported */
- /* Does board support performant mode? */
- trans_support = readl(&(h->cfgtable->TransportSupport));
- if (!(trans_support & PERFORMANT_MODE))
- return;
-
- dev_dbg(&h->pdev->dev, "Placing controller into performant mode\n");
- /* Performant mode demands commands on a 32 byte boundary
- * pci_alloc_consistent aligns on page boundarys already.
- * Just need to check if divisible by 32
- */
- if ((sizeof(CommandList_struct) % 32) != 0) {
- dev_warn(&h->pdev->dev, "%s %d %s\n",
- "cciss info: command size[",
- (int)sizeof(CommandList_struct),
- "] not divisible by 32, no performant mode..\n");
- return;
- }
-
- /* Performant mode ring buffer and supporting data structures */
- h->reply_pool = (__u64 *)pci_alloc_consistent(
- h->pdev, h->max_commands * sizeof(__u64),
- &(h->reply_pool_dhandle));
-
- /* Need a block fetch table for performant mode */
- h->blockFetchTable = kmalloc(((h->maxsgentries+1) *
- sizeof(__u32)), GFP_KERNEL);
-
- if ((h->reply_pool == NULL) || (h->blockFetchTable == NULL))
- goto clean_up;
-
- cciss_enter_performant_mode(h,
- trans_support & CFGTBL_Trans_use_short_tags);
-
- /* Change the access methods to the performant access methods */
- h->access = SA5_performant_access;
- h->transMethod = CFGTBL_Trans_Performant;
-
- return;
-clean_up:
- kfree(h->blockFetchTable);
- if (h->reply_pool)
- pci_free_consistent(h->pdev,
- h->max_commands * sizeof(__u64),
- h->reply_pool,
- h->reply_pool_dhandle);
- return;
-
-} /* cciss_put_controller_into_performant_mode */
-
-/* If MSI/MSI-X is supported by the kernel we will try to enable it on
- * controllers that are capable. If not, we use IO-APIC mode.
- */
-
-static void cciss_interrupt_mode(ctlr_info_t *h)
-{
- int ret;
-
- /* Some boards advertise MSI but don't really support it */
- if ((h->board_id == 0x40700E11) || (h->board_id == 0x40800E11) ||
- (h->board_id == 0x40820E11) || (h->board_id == 0x40830E11))
- goto default_int_mode;
-
- ret = pci_alloc_irq_vectors(h->pdev, 4, 4, PCI_IRQ_MSIX);
- if (ret >= 0) {
- h->intr[0] = pci_irq_vector(h->pdev, 0);
- h->intr[1] = pci_irq_vector(h->pdev, 1);
- h->intr[2] = pci_irq_vector(h->pdev, 2);
- h->intr[3] = pci_irq_vector(h->pdev, 3);
- return;
- }
-
- ret = pci_alloc_irq_vectors(h->pdev, 1, 1, PCI_IRQ_MSI);
-
-default_int_mode:
- /* if we get here we're going to use the default interrupt mode */
- h->intr[h->intr_mode] = pci_irq_vector(h->pdev, 0);
- return;
-}
-
-static int cciss_lookup_board_id(struct pci_dev *pdev, u32 *board_id)
-{
- int i;
- u32 subsystem_vendor_id, subsystem_device_id;
-
- subsystem_vendor_id = pdev->subsystem_vendor;
- subsystem_device_id = pdev->subsystem_device;
- *board_id = ((subsystem_device_id << 16) & 0xffff0000) |
- subsystem_vendor_id;
-
- for (i = 0; i < ARRAY_SIZE(products); i++) {
- /* Stand aside for hpsa driver on request */
- if (cciss_allow_hpsa)
- return -ENODEV;
- if (*board_id == products[i].board_id)
- return i;
- }
- dev_warn(&pdev->dev, "unrecognized board ID: 0x%08x, ignoring.\n",
- *board_id);
- return -ENODEV;
-}
-
-static inline bool cciss_board_disabled(ctlr_info_t *h)
-{
- u16 command;
-
- (void) pci_read_config_word(h->pdev, PCI_COMMAND, &command);
- return ((command & PCI_COMMAND_MEMORY) == 0);
-}
-
-static int cciss_pci_find_memory_BAR(struct pci_dev *pdev,
- unsigned long *memory_bar)
-{
- int i;
-
- for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
- if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
- /* addressing mode bits already removed */
- *memory_bar = pci_resource_start(pdev, i);
- dev_dbg(&pdev->dev, "memory BAR = %lx\n",
- *memory_bar);
- return 0;
- }
- dev_warn(&pdev->dev, "no memory BAR found\n");
- return -ENODEV;
-}
-
-static int cciss_wait_for_board_state(struct pci_dev *pdev,
- void __iomem *vaddr, int wait_for_ready)
-#define BOARD_READY 1
-#define BOARD_NOT_READY 0
-{
- int i, iterations;
- u32 scratchpad;
-
- if (wait_for_ready)
- iterations = CCISS_BOARD_READY_ITERATIONS;
- else
- iterations = CCISS_BOARD_NOT_READY_ITERATIONS;
-
- for (i = 0; i < iterations; i++) {
- scratchpad = readl(vaddr + SA5_SCRATCHPAD_OFFSET);
- if (wait_for_ready) {
- if (scratchpad == CCISS_FIRMWARE_READY)
- return 0;
- } else {
- if (scratchpad != CCISS_FIRMWARE_READY)
- return 0;
- }
- msleep(CCISS_BOARD_READY_POLL_INTERVAL_MSECS);
- }
- dev_warn(&pdev->dev, "board not ready, timed out.\n");
- return -ENODEV;
-}
-
-static int cciss_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
- u32 *cfg_base_addr, u64 *cfg_base_addr_index,
- u64 *cfg_offset)
-{
- *cfg_base_addr = readl(vaddr + SA5_CTCFG_OFFSET);
- *cfg_offset = readl(vaddr + SA5_CTMEM_OFFSET);
- *cfg_base_addr &= (u32) 0x0000ffff;
- *cfg_base_addr_index = find_PCI_BAR_index(pdev, *cfg_base_addr);
- if (*cfg_base_addr_index == -1) {
- dev_warn(&pdev->dev, "cannot find cfg_base_addr_index, "
- "*cfg_base_addr = 0x%08x\n", *cfg_base_addr);
- return -ENODEV;
- }
- return 0;
-}
-
-static int cciss_find_cfgtables(ctlr_info_t *h)
-{
- u64 cfg_offset;
- u32 cfg_base_addr;
- u64 cfg_base_addr_index;
- u32 trans_offset;
- int rc;
-
- rc = cciss_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
- &cfg_base_addr_index, &cfg_offset);
- if (rc)
- return rc;
- h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev,
- cfg_base_addr_index) + cfg_offset, sizeof(*h->cfgtable));
- if (!h->cfgtable)
- return -ENOMEM;
- rc = write_driver_ver_to_cfgtable(h->cfgtable);
- if (rc)
- return rc;
- /* Find performant mode table. */
- trans_offset = readl(&h->cfgtable->TransMethodOffset);
- h->transtable = remap_pci_mem(pci_resource_start(h->pdev,
- cfg_base_addr_index)+cfg_offset+trans_offset,
- sizeof(*h->transtable));
- if (!h->transtable)
- return -ENOMEM;
- return 0;
-}
-
-static void cciss_get_max_perf_mode_cmds(struct ctlr_info *h)
-{
- h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands));
-
- /* Limit commands in memory limited kdump scenario. */
- if (reset_devices && h->max_commands > 32)
- h->max_commands = 32;
-
- if (h->max_commands < 16) {
- dev_warn(&h->pdev->dev, "Controller reports "
- "max supported commands of %d, an obvious lie. "
- "Using 16. Ensure that firmware is up to date.\n",
- h->max_commands);
- h->max_commands = 16;
- }
-}
-
-/* Interrogate the hardware for some limits:
- * max commands, max SG elements without chaining, and with chaining,
- * SG chain block size, etc.
- */
-static void cciss_find_board_params(ctlr_info_t *h)
-{
- cciss_get_max_perf_mode_cmds(h);
- h->nr_cmds = h->max_commands - 4 - cciss_tape_cmds;
- h->maxsgentries = readl(&(h->cfgtable->MaxSGElements));
- /*
- * The P600 may exhibit poor performnace under some workloads
- * if we use the value in the configuration table. Limit this
- * controller to MAXSGENTRIES (32) instead.
- */
- if (h->board_id == 0x3225103C)
- h->maxsgentries = MAXSGENTRIES;
- /*
- * Limit in-command s/g elements to 32 save dma'able memory.
- * Howvever spec says if 0, use 31
- */
- h->max_cmd_sgentries = 31;
- if (h->maxsgentries > 512) {
- h->max_cmd_sgentries = 32;
- h->chainsize = h->maxsgentries - h->max_cmd_sgentries + 1;
- h->maxsgentries--; /* save one for chain pointer */
- } else {
- h->maxsgentries = 31; /* default to traditional values */
- h->chainsize = 0;
- }
-}
-
-static inline bool CISS_signature_present(ctlr_info_t *h)
-{
- if (!check_signature(h->cfgtable->Signature, "CISS", 4)) {
- dev_warn(&h->pdev->dev, "not a valid CISS config table\n");
- return false;
- }
- return true;
-}
-
-/* Need to enable prefetch in the SCSI core for 6400 in x86 */
-static inline void cciss_enable_scsi_prefetch(ctlr_info_t *h)
-{
-#ifdef CONFIG_X86
- u32 prefetch;
-
- prefetch = readl(&(h->cfgtable->SCSI_Prefetch));
- prefetch |= 0x100;
- writel(prefetch, &(h->cfgtable->SCSI_Prefetch));
-#endif
-}
-
-/* Disable DMA prefetch for the P600. Otherwise an ASIC bug may result
- * in a prefetch beyond physical memory.
- */
-static inline void cciss_p600_dma_prefetch_quirk(ctlr_info_t *h)
-{
- u32 dma_prefetch;
- __u32 dma_refetch;
-
- if (h->board_id != 0x3225103C)
- return;
- dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG);
- dma_prefetch |= 0x8000;
- writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG);
- pci_read_config_dword(h->pdev, PCI_COMMAND_PARITY, &dma_refetch);
- dma_refetch |= 0x1;
- pci_write_config_dword(h->pdev, PCI_COMMAND_PARITY, dma_refetch);
-}
-
-static int cciss_pci_init(ctlr_info_t *h)
-{
- int prod_index, err;
-
- prod_index = cciss_lookup_board_id(h->pdev, &h->board_id);
- if (prod_index < 0)
- return -ENODEV;
- h->product_name = products[prod_index].product_name;
- h->access = *(products[prod_index].access);
-
- if (cciss_board_disabled(h)) {
- dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
- return -ENODEV;
- }
-
- pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
- PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
-
- err = pci_enable_device(h->pdev);
- if (err) {
- dev_warn(&h->pdev->dev, "Unable to Enable PCI device\n");
- return err;
- }
-
- err = pci_request_regions(h->pdev, "cciss");
- if (err) {
- dev_warn(&h->pdev->dev,
- "Cannot obtain PCI resources, aborting\n");
- return err;
- }
-
- dev_dbg(&h->pdev->dev, "irq = %x\n", h->pdev->irq);
- dev_dbg(&h->pdev->dev, "board_id = %x\n", h->board_id);
-
-/* If the kernel supports MSI/MSI-X we will try to enable that functionality,
- * else we use the IO-APIC interrupt assigned to us by system ROM.
- */
- cciss_interrupt_mode(h);
- err = cciss_pci_find_memory_BAR(h->pdev, &h->paddr);
- if (err)
- goto err_out_free_res;
- h->vaddr = remap_pci_mem(h->paddr, 0x250);
- if (!h->vaddr) {
- err = -ENOMEM;
- goto err_out_free_res;
- }
- err = cciss_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY);
- if (err)
- goto err_out_free_res;
- err = cciss_find_cfgtables(h);
- if (err)
- goto err_out_free_res;
- print_cfg_table(h);
- cciss_find_board_params(h);
-
- if (!CISS_signature_present(h)) {
- err = -ENODEV;
- goto err_out_free_res;
- }
- cciss_enable_scsi_prefetch(h);
- cciss_p600_dma_prefetch_quirk(h);
- err = cciss_enter_simple_mode(h);
- if (err)
- goto err_out_free_res;
- cciss_put_controller_into_performant_mode(h);
- return 0;
-
-err_out_free_res:
- /*
- * Deliberately omit pci_disable_device(): it does something nasty to
- * Smart Array controllers that pci_enable_device does not undo
- */
- if (h->transtable)
- iounmap(h->transtable);
- if (h->cfgtable)
- iounmap(h->cfgtable);
- if (h->vaddr)
- iounmap(h->vaddr);
- pci_release_regions(h->pdev);
- return err;
-}
-
-/* Function to find the first free pointer into our hba[] array
- * Returns -1 if no free entries are left.
- */
-static int alloc_cciss_hba(struct pci_dev *pdev)
-{
- int i;
-
- for (i = 0; i < MAX_CTLR; i++) {
- if (!hba[i]) {
- ctlr_info_t *h;
-
- h = kzalloc(sizeof(ctlr_info_t), GFP_KERNEL);
- if (!h)
- goto Enomem;
- hba[i] = h;
- return i;
- }
- }
- dev_warn(&pdev->dev, "This driver supports a maximum"
- " of %d controllers.\n", MAX_CTLR);
- return -1;
-Enomem:
- dev_warn(&pdev->dev, "out of memory.\n");
- return -1;
-}
-
-static void free_hba(ctlr_info_t *h)
-{
- int i;
-
- hba[h->ctlr] = NULL;
- for (i = 0; i < h->highest_lun + 1; i++)
- if (h->gendisk[i] != NULL)
- put_disk(h->gendisk[i]);
- kfree(h);
-}
-
-/* Send a message CDB to the firmware. */
-static int cciss_message(struct pci_dev *pdev, unsigned char opcode,
- unsigned char type)
-{
- typedef struct {
- CommandListHeader_struct CommandHeader;
- RequestBlock_struct Request;
- ErrDescriptor_struct ErrorDescriptor;
- } Command;
- static const size_t cmd_sz = sizeof(Command) + sizeof(ErrorInfo_struct);
- Command *cmd;
- dma_addr_t paddr64;
- uint32_t paddr32, tag;
- void __iomem *vaddr;
- int i, err;
-
- vaddr = ioremap_nocache(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0));
- if (vaddr == NULL)
- return -ENOMEM;
-
- /* The Inbound Post Queue only accepts 32-bit physical addresses for the
- CCISS commands, so they must be allocated from the lower 4GiB of
- memory. */
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
- if (err) {
- iounmap(vaddr);
- return -ENOMEM;
- }
-
- cmd = pci_alloc_consistent(pdev, cmd_sz, &paddr64);
- if (cmd == NULL) {
- iounmap(vaddr);
- return -ENOMEM;
- }
-
- /* This must fit, because of the 32-bit consistent DMA mask. Also,
- although there's no guarantee, we assume that the address is at
- least 4-byte aligned (most likely, it's page-aligned). */
- paddr32 = paddr64;
-
- cmd->CommandHeader.ReplyQueue = 0;
- cmd->CommandHeader.SGList = 0;
- cmd->CommandHeader.SGTotal = 0;
- cmd->CommandHeader.Tag.lower = paddr32;
- cmd->CommandHeader.Tag.upper = 0;
- memset(&cmd->CommandHeader.LUN.LunAddrBytes, 0, 8);
-
- cmd->Request.CDBLen = 16;
- cmd->Request.Type.Type = TYPE_MSG;
- cmd->Request.Type.Attribute = ATTR_HEADOFQUEUE;
- cmd->Request.Type.Direction = XFER_NONE;
- cmd->Request.Timeout = 0; /* Don't time out */
- cmd->Request.CDB[0] = opcode;
- cmd->Request.CDB[1] = type;
- memset(&cmd->Request.CDB[2], 0, 14); /* the rest of the CDB is reserved */
-
- cmd->ErrorDescriptor.Addr.lower = paddr32 + sizeof(Command);
- cmd->ErrorDescriptor.Addr.upper = 0;
- cmd->ErrorDescriptor.Len = sizeof(ErrorInfo_struct);
-
- writel(paddr32, vaddr + SA5_REQUEST_PORT_OFFSET);
-
- for (i = 0; i < 10; i++) {
- tag = readl(vaddr + SA5_REPLY_PORT_OFFSET);
- if ((tag & ~3) == paddr32)
- break;
- msleep(CCISS_POST_RESET_NOOP_TIMEOUT_MSECS);
- }
-
- iounmap(vaddr);
-
- /* we leak the DMA buffer here ... no choice since the controller could
- still complete the command. */
- if (i == 10) {
- dev_err(&pdev->dev,
- "controller message %02x:%02x timed out\n",
- opcode, type);
- return -ETIMEDOUT;
- }
-
- pci_free_consistent(pdev, cmd_sz, cmd, paddr64);
-
- if (tag & 2) {
- dev_err(&pdev->dev, "controller message %02x:%02x failed\n",
- opcode, type);
- return -EIO;
- }
-
- dev_info(&pdev->dev, "controller message %02x:%02x succeeded\n",
- opcode, type);
- return 0;
-}
-
-#define cciss_noop(p) cciss_message(p, 3, 0)
-
-static int cciss_controller_hard_reset(struct pci_dev *pdev,
- void * __iomem vaddr, u32 use_doorbell)
-{
- u16 pmcsr;
- int pos;
-
- if (use_doorbell) {
- /* For everything after the P600, the PCI power state method
- * of resetting the controller doesn't work, so we have this
- * other way using the doorbell register.
- */
- dev_info(&pdev->dev, "using doorbell to reset controller\n");
- writel(use_doorbell, vaddr + SA5_DOORBELL);
- } else { /* Try to do it the PCI power state way */
-
- /* Quoting from the Open CISS Specification: "The Power
- * Management Control/Status Register (CSR) controls the power
- * state of the device. The normal operating state is D0,
- * CSR=00h. The software off state is D3, CSR=03h. To reset
- * the controller, place the interface device in D3 then to D0,
- * this causes a secondary PCI reset which will reset the
- * controller." */
-
- pos = pci_find_capability(pdev, PCI_CAP_ID_PM);
- if (pos == 0) {
- dev_err(&pdev->dev,
- "cciss_controller_hard_reset: "
- "PCI PM not supported\n");
- return -ENODEV;
- }
- dev_info(&pdev->dev, "using PCI PM to reset controller\n");
- /* enter the D3hot power management state */
- pci_read_config_word(pdev, pos + PCI_PM_CTRL, &pmcsr);
- pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
- pmcsr |= PCI_D3hot;
- pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr);
-
- msleep(500);
-
- /* enter the D0 power management state */
- pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
- pmcsr |= PCI_D0;
- pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr);
-
- /*
- * The P600 requires a small delay when changing states.
- * Otherwise we may think the board did not reset and we bail.
- * This for kdump only and is particular to the P600.
- */
- msleep(500);
- }
- return 0;
-}
-
-static void init_driver_version(char *driver_version, int len)
-{
- memset(driver_version, 0, len);
- strncpy(driver_version, "cciss " DRIVER_NAME, len - 1);
-}
-
-static int write_driver_ver_to_cfgtable(CfgTable_struct __iomem *cfgtable)
-{
- char *driver_version;
- int i, size = sizeof(cfgtable->driver_version);
-
- driver_version = kmalloc(size, GFP_KERNEL);
- if (!driver_version)
- return -ENOMEM;
-
- init_driver_version(driver_version, size);
- for (i = 0; i < size; i++)
- writeb(driver_version[i], &cfgtable->driver_version[i]);
- kfree(driver_version);
- return 0;
-}
-
-static void read_driver_ver_from_cfgtable(CfgTable_struct __iomem *cfgtable,
- unsigned char *driver_ver)
-{
- int i;
-
- for (i = 0; i < sizeof(cfgtable->driver_version); i++)
- driver_ver[i] = readb(&cfgtable->driver_version[i]);
-}
-
-static int controller_reset_failed(CfgTable_struct __iomem *cfgtable)
-{
-
- char *driver_ver, *old_driver_ver;
- int rc, size = sizeof(cfgtable->driver_version);
-
- old_driver_ver = kmalloc(2 * size, GFP_KERNEL);
- if (!old_driver_ver)
- return -ENOMEM;
- driver_ver = old_driver_ver + size;
-
- /* After a reset, the 32 bytes of "driver version" in the cfgtable
- * should have been changed, otherwise we know the reset failed.
- */
- init_driver_version(old_driver_ver, size);
- read_driver_ver_from_cfgtable(cfgtable, driver_ver);
- rc = !memcmp(driver_ver, old_driver_ver, size);
- kfree(old_driver_ver);
- return rc;
-}
-
-/* This does a hard reset of the controller using PCI power management
- * states or using the doorbell register. */
-static int cciss_kdump_hard_reset_controller(struct pci_dev *pdev)
-{
- u64 cfg_offset;
- u32 cfg_base_addr;
- u64 cfg_base_addr_index;
- void __iomem *vaddr;
- unsigned long paddr;
- u32 misc_fw_support;
- int rc;
- CfgTable_struct __iomem *cfgtable;
- u32 use_doorbell;
- u32 board_id;
- u16 command_register;
-
- /* For controllers as old a the p600, this is very nearly
- * the same thing as
- *
- * pci_save_state(pci_dev);
- * pci_set_power_state(pci_dev, PCI_D3hot);
- * pci_set_power_state(pci_dev, PCI_D0);
- * pci_restore_state(pci_dev);
- *
- * For controllers newer than the P600, the pci power state
- * method of resetting doesn't work so we have another way
- * using the doorbell register.
- */
-
- /* Exclude 640x boards. These are two pci devices in one slot
- * which share a battery backed cache module. One controls the
- * cache, the other accesses the cache through the one that controls
- * it. If we reset the one controlling the cache, the other will
- * likely not be happy. Just forbid resetting this conjoined mess.
- */
- cciss_lookup_board_id(pdev, &board_id);
- if (!ctlr_is_resettable(board_id)) {
- dev_warn(&pdev->dev, "Controller not resettable\n");
- return -ENODEV;
- }
-
- /* if controller is soft- but not hard resettable... */
- if (!ctlr_is_hard_resettable(board_id))
- return -ENOTSUPP; /* try soft reset later. */
-
- /* Save the PCI command register */
- pci_read_config_word(pdev, 4, &command_register);
- /* Turn the board off. This is so that later pci_restore_state()
- * won't turn the board on before the rest of config space is ready.
- */
- pci_disable_device(pdev);
- pci_save_state(pdev);
-
- /* find the first memory BAR, so we can find the cfg table */
- rc = cciss_pci_find_memory_BAR(pdev, &paddr);
- if (rc)
- return rc;
- vaddr = remap_pci_mem(paddr, 0x250);
- if (!vaddr)
- return -ENOMEM;
-
- /* find cfgtable in order to check if reset via doorbell is supported */
- rc = cciss_find_cfg_addrs(pdev, vaddr, &cfg_base_addr,
- &cfg_base_addr_index, &cfg_offset);
- if (rc)
- goto unmap_vaddr;
- cfgtable = remap_pci_mem(pci_resource_start(pdev,
- cfg_base_addr_index) + cfg_offset, sizeof(*cfgtable));
- if (!cfgtable) {
- rc = -ENOMEM;
- goto unmap_vaddr;
- }
- rc = write_driver_ver_to_cfgtable(cfgtable);
- if (rc)
- goto unmap_vaddr;
-
- /* If reset via doorbell register is supported, use that.
- * There are two such methods. Favor the newest method.
- */
- misc_fw_support = readl(&cfgtable->misc_fw_support);
- use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET2;
- if (use_doorbell) {
- use_doorbell = DOORBELL_CTLR_RESET2;
- } else {
- use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET;
- if (use_doorbell) {
- dev_warn(&pdev->dev, "Controller claims that "
- "'Bit 2 doorbell reset' is "
- "supported, but not 'bit 5 doorbell reset'. "
- "Firmware update is recommended.\n");
- rc = -ENOTSUPP; /* use the soft reset */
- goto unmap_cfgtable;
- }
- }
-
- rc = cciss_controller_hard_reset(pdev, vaddr, use_doorbell);
- if (rc)
- goto unmap_cfgtable;
- pci_restore_state(pdev);
- rc = pci_enable_device(pdev);
- if (rc) {
- dev_warn(&pdev->dev, "failed to enable device.\n");
- goto unmap_cfgtable;
- }
- pci_write_config_word(pdev, 4, command_register);
-
- /* Some devices (notably the HP Smart Array 5i Controller)
- need a little pause here */
- msleep(CCISS_POST_RESET_PAUSE_MSECS);
-
- /* Wait for board to become not ready, then ready. */
- dev_info(&pdev->dev, "Waiting for board to reset.\n");
- rc = cciss_wait_for_board_state(pdev, vaddr, BOARD_NOT_READY);
- if (rc) {
- dev_warn(&pdev->dev, "Failed waiting for board to hard reset."
- " Will try soft reset.\n");
- rc = -ENOTSUPP; /* Not expected, but try soft reset later */
- goto unmap_cfgtable;
- }
- rc = cciss_wait_for_board_state(pdev, vaddr, BOARD_READY);
- if (rc) {
- dev_warn(&pdev->dev,
- "failed waiting for board to become ready "
- "after hard reset\n");
- goto unmap_cfgtable;
- }
-
- rc = controller_reset_failed(vaddr);
- if (rc < 0)
- goto unmap_cfgtable;
- if (rc) {
- dev_warn(&pdev->dev, "Unable to successfully hard reset "
- "controller. Will try soft reset.\n");
- rc = -ENOTSUPP; /* Not expected, but try soft reset later */
- } else {
- dev_info(&pdev->dev, "Board ready after hard reset.\n");
- }
-
-unmap_cfgtable:
- iounmap(cfgtable);
-
-unmap_vaddr:
- iounmap(vaddr);
- return rc;
-}
-
-static int cciss_init_reset_devices(struct pci_dev *pdev)
-{
- int rc, i;
-
- if (!reset_devices)
- return 0;
-
- /* Reset the controller with a PCI power-cycle or via doorbell */
- rc = cciss_kdump_hard_reset_controller(pdev);
-
- /* -ENOTSUPP here means we cannot reset the controller
- * but it's already (and still) up and running in
- * "performant mode". Or, it might be 640x, which can't reset
- * due to concerns about shared bbwc between 6402/6404 pair.
- */
- if (rc == -ENOTSUPP)
- return rc; /* just try to do the kdump anyhow. */
- if (rc)
- return -ENODEV;
-
- /* Now try to get the controller to respond to a no-op */
- dev_warn(&pdev->dev, "Waiting for controller to respond to no-op\n");
- for (i = 0; i < CCISS_POST_RESET_NOOP_RETRIES; i++) {
- if (cciss_noop(pdev) == 0)
- break;
- else
- dev_warn(&pdev->dev, "no-op failed%s\n",
- (i < CCISS_POST_RESET_NOOP_RETRIES - 1 ?
- "; re-trying" : ""));
- msleep(CCISS_POST_RESET_NOOP_INTERVAL_MSECS);
- }
- return 0;
-}
-
-static int cciss_allocate_cmd_pool(ctlr_info_t *h)
-{
- h->cmd_pool_bits = kmalloc(BITS_TO_LONGS(h->nr_cmds) *
- sizeof(unsigned long), GFP_KERNEL);
- h->cmd_pool = pci_alloc_consistent(h->pdev,
- h->nr_cmds * sizeof(CommandList_struct),
- &(h->cmd_pool_dhandle));
- h->errinfo_pool = pci_alloc_consistent(h->pdev,
- h->nr_cmds * sizeof(ErrorInfo_struct),
- &(h->errinfo_pool_dhandle));
- if ((h->cmd_pool_bits == NULL)
- || (h->cmd_pool == NULL)
- || (h->errinfo_pool == NULL)) {
- dev_err(&h->pdev->dev, "out of memory");
- return -ENOMEM;
- }
- return 0;
-}
-
-static int cciss_allocate_scatterlists(ctlr_info_t *h)
-{
- int i;
-
- /* zero it, so that on free we need not know how many were alloc'ed */
- h->scatter_list = kzalloc(h->max_commands *
- sizeof(struct scatterlist *), GFP_KERNEL);
- if (!h->scatter_list)
- return -ENOMEM;
-
- for (i = 0; i < h->nr_cmds; i++) {
- h->scatter_list[i] = kmalloc(sizeof(struct scatterlist) *
- h->maxsgentries, GFP_KERNEL);
- if (h->scatter_list[i] == NULL) {
- dev_err(&h->pdev->dev, "could not allocate "
- "s/g lists\n");
- return -ENOMEM;
- }
- }
- return 0;
-}
-
-static void cciss_free_scatterlists(ctlr_info_t *h)
-{
- int i;
-
- if (h->scatter_list) {
- for (i = 0; i < h->nr_cmds; i++)
- kfree(h->scatter_list[i]);
- kfree(h->scatter_list);
- }
-}
-
-static void cciss_free_cmd_pool(ctlr_info_t *h)
-{
- kfree(h->cmd_pool_bits);
- if (h->cmd_pool)
- pci_free_consistent(h->pdev,
- h->nr_cmds * sizeof(CommandList_struct),
- h->cmd_pool, h->cmd_pool_dhandle);
- if (h->errinfo_pool)
- pci_free_consistent(h->pdev,
- h->nr_cmds * sizeof(ErrorInfo_struct),
- h->errinfo_pool, h->errinfo_pool_dhandle);
-}
-
-static int cciss_request_irq(ctlr_info_t *h,
- irqreturn_t (*msixhandler)(int, void *),
- irqreturn_t (*intxhandler)(int, void *))
-{
- if (h->pdev->msi_enabled || h->pdev->msix_enabled) {
- if (!request_irq(h->intr[h->intr_mode], msixhandler,
- 0, h->devname, h))
- return 0;
- dev_err(&h->pdev->dev, "Unable to get msi irq %d"
- " for %s\n", h->intr[h->intr_mode],
- h->devname);
- return -1;
- }
-
- if (!request_irq(h->intr[h->intr_mode], intxhandler,
- IRQF_SHARED, h->devname, h))
- return 0;
- dev_err(&h->pdev->dev, "Unable to get irq %d for %s\n",
- h->intr[h->intr_mode], h->devname);
- return -1;
-}
-
-static int cciss_kdump_soft_reset(ctlr_info_t *h)
-{
- if (cciss_send_reset(h, CTLR_LUNID, CCISS_RESET_TYPE_CONTROLLER)) {
- dev_warn(&h->pdev->dev, "Resetting array controller failed.\n");
- return -EIO;
- }
-
- dev_info(&h->pdev->dev, "Waiting for board to soft reset.\n");
- if (cciss_wait_for_board_state(h->pdev, h->vaddr, BOARD_NOT_READY)) {
- dev_warn(&h->pdev->dev, "Soft reset had no effect.\n");
- return -1;
- }
-
- dev_info(&h->pdev->dev, "Board reset, awaiting READY status.\n");
- if (cciss_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY)) {
- dev_warn(&h->pdev->dev, "Board failed to become ready "
- "after soft reset.\n");
- return -1;
- }
-
- return 0;
-}
-
-static void cciss_undo_allocations_after_kdump_soft_reset(ctlr_info_t *h)
-{
- int ctlr = h->ctlr;
-
- free_irq(h->intr[h->intr_mode], h);
- pci_free_irq_vectors(h->pdev);
- cciss_free_sg_chain_blocks(h->cmd_sg_list, h->nr_cmds);
- cciss_free_scatterlists(h);
- cciss_free_cmd_pool(h);
- kfree(h->blockFetchTable);
- if (h->reply_pool)
- pci_free_consistent(h->pdev, h->max_commands * sizeof(__u64),
- h->reply_pool, h->reply_pool_dhandle);
- if (h->transtable)
- iounmap(h->transtable);
- if (h->cfgtable)
- iounmap(h->cfgtable);
- if (h->vaddr)
- iounmap(h->vaddr);
- unregister_blkdev(h->major, h->devname);
- cciss_destroy_hba_sysfs_entry(h);
- pci_release_regions(h->pdev);
- kfree(h);
- hba[ctlr] = NULL;
-}
-
-/*
- * This is it. Find all the controllers and register them. I really hate
- * stealing all these major device numbers.
- * returns the number of block devices registered.
- */
-static int cciss_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
-{
- int i;
- int j = 0;
- int rc;
- int try_soft_reset = 0;
- int dac, return_code;
- InquiryData_struct *inq_buff;
- ctlr_info_t *h;
- unsigned long flags;
-
- /*
- * By default the cciss driver is used for all older HP Smart Array
- * controllers. There are module paramaters that allow a user to
- * override this behavior and instead use the hpsa SCSI driver. If
- * this is the case cciss may be loaded first from the kdump initrd
- * image and cause a kernel panic. So if reset_devices is true and
- * cciss_allow_hpsa is set just bail.
- */
- if ((reset_devices) && (cciss_allow_hpsa == 1))
- return -ENODEV;
- rc = cciss_init_reset_devices(pdev);
- if (rc) {
- if (rc != -ENOTSUPP)
- return rc;
- /* If the reset fails in a particular way (it has no way to do
- * a proper hard reset, so returns -ENOTSUPP) we can try to do
- * a soft reset once we get the controller configured up to the
- * point that it can accept a command.
- */
- try_soft_reset = 1;
- rc = 0;
- }
-
-reinit_after_soft_reset:
-
- i = alloc_cciss_hba(pdev);
- if (i < 0)
- return -ENOMEM;
-
- h = hba[i];
- h->pdev = pdev;
- h->busy_initializing = 1;
- h->intr_mode = cciss_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT;
- INIT_LIST_HEAD(&h->cmpQ);
- INIT_LIST_HEAD(&h->reqQ);
- mutex_init(&h->busy_shutting_down);
-
- if (cciss_pci_init(h) != 0)
- goto clean_no_release_regions;
-
- sprintf(h->devname, "cciss%d", i);
- h->ctlr = i;
-
- if (cciss_tape_cmds < 2)
- cciss_tape_cmds = 2;
- if (cciss_tape_cmds > 16)
- cciss_tape_cmds = 16;
-
- init_completion(&h->scan_wait);
-
- if (cciss_create_hba_sysfs_entry(h))
- goto clean0;
-
- /* configure PCI DMA stuff */
- if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)))
- dac = 1;
- else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))
- dac = 0;
- else {
- dev_err(&h->pdev->dev, "no suitable DMA available\n");
- goto clean1;
- }
-
- /*
- * register with the major number, or get a dynamic major number
- * by passing 0 as argument. This is done for greater than
- * 8 controller support.
- */
- if (i < MAX_CTLR_ORIG)
- h->major = COMPAQ_CISS_MAJOR + i;
- rc = register_blkdev(h->major, h->devname);
- if (rc == -EBUSY || rc == -EINVAL) {
- dev_err(&h->pdev->dev,
- "Unable to get major number %d for %s "
- "on hba %d\n", h->major, h->devname, i);
- goto clean1;
- } else {
- if (i >= MAX_CTLR_ORIG)
- h->major = rc;
- }
-
- /* make sure the board interrupts are off */
- h->access.set_intr_mask(h, CCISS_INTR_OFF);
- rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
- if (rc)
- goto clean2;
-
- dev_info(&h->pdev->dev, "%s: <0x%x> at PCI %s IRQ %d%s using DAC\n",
- h->devname, pdev->device, pci_name(pdev),
- h->intr[h->intr_mode], dac ? "" : " not");
-
- if (cciss_allocate_cmd_pool(h))
- goto clean4;
-
- if (cciss_allocate_scatterlists(h))
- goto clean4;
-
- h->cmd_sg_list = cciss_allocate_sg_chain_blocks(h,
- h->chainsize, h->nr_cmds);
- if (!h->cmd_sg_list && h->chainsize > 0)
- goto clean4;
-
- spin_lock_init(&h->lock);
-
- /* Initialize the pdev driver private data.
- have it point to h. */
- pci_set_drvdata(pdev, h);
- /* command and error info recs zeroed out before
- they are used */
- bitmap_zero(h->cmd_pool_bits, h->nr_cmds);
-
- h->num_luns = 0;
- h->highest_lun = -1;
- for (j = 0; j < CISS_MAX_LUN; j++) {
- h->drv[j] = NULL;
- h->gendisk[j] = NULL;
- }
-
- /* At this point, the controller is ready to take commands.
- * Now, if reset_devices and the hard reset didn't work, try
- * the soft reset and see if that works.
- */
- if (try_soft_reset) {
-
- /* This is kind of gross. We may or may not get a completion
- * from the soft reset command, and if we do, then the value
- * from the fifo may or may not be valid. So, we wait 10 secs
- * after the reset throwing away any completions we get during
- * that time. Unregister the interrupt handler and register
- * fake ones to scoop up any residual completions.
- */
- spin_lock_irqsave(&h->lock, flags);
- h->access.set_intr_mask(h, CCISS_INTR_OFF);
- spin_unlock_irqrestore(&h->lock, flags);
- free_irq(h->intr[h->intr_mode], h);
- rc = cciss_request_irq(h, cciss_msix_discard_completions,
- cciss_intx_discard_completions);
- if (rc) {
- dev_warn(&h->pdev->dev, "Failed to request_irq after "
- "soft reset.\n");
- goto clean4;
- }
-
- rc = cciss_kdump_soft_reset(h);
- if (rc) {
- dev_warn(&h->pdev->dev, "Soft reset failed.\n");
- goto clean4;
- }
-
- dev_info(&h->pdev->dev, "Board READY.\n");
- dev_info(&h->pdev->dev,
- "Waiting for stale completions to drain.\n");
- h->access.set_intr_mask(h, CCISS_INTR_ON);
- msleep(10000);
- h->access.set_intr_mask(h, CCISS_INTR_OFF);
-
- rc = controller_reset_failed(h->cfgtable);
- if (rc)
- dev_info(&h->pdev->dev,
- "Soft reset appears to have failed.\n");
-
- /* since the controller's reset, we have to go back and re-init
- * everything. Easiest to just forget what we've done and do it
- * all over again.
- */
- cciss_undo_allocations_after_kdump_soft_reset(h);
- try_soft_reset = 0;
- if (rc)
- /* don't go to clean4, we already unallocated */
- return -ENODEV;
-
- goto reinit_after_soft_reset;
- }
-
- cciss_scsi_setup(h);
-
- /* Turn the interrupts on so we can service requests */
- h->access.set_intr_mask(h, CCISS_INTR_ON);
-
- /* Get the firmware version */
- inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
- if (inq_buff == NULL) {
- dev_err(&h->pdev->dev, "out of memory\n");
- goto clean4;
- }
-
- return_code = sendcmd_withirq(h, CISS_INQUIRY, inq_buff,
- sizeof(InquiryData_struct), 0, CTLR_LUNID, TYPE_CMD);
- if (return_code == IO_OK) {
- h->firm_ver[0] = inq_buff->data_byte[32];
- h->firm_ver[1] = inq_buff->data_byte[33];
- h->firm_ver[2] = inq_buff->data_byte[34];
- h->firm_ver[3] = inq_buff->data_byte[35];
- } else { /* send command failed */
- dev_warn(&h->pdev->dev, "unable to determine firmware"
- " version of controller\n");
- }
- kfree(inq_buff);
-
- cciss_procinit(h);
-
- h->cciss_max_sectors = 8192;
-
- rebuild_lun_table(h, 1, 0);
- cciss_engage_scsi(h);
- h->busy_initializing = 0;
- return 0;
-
-clean4:
- cciss_free_cmd_pool(h);
- cciss_free_scatterlists(h);
- cciss_free_sg_chain_blocks(h->cmd_sg_list, h->nr_cmds);
- free_irq(h->intr[h->intr_mode], h);
-clean2:
- unregister_blkdev(h->major, h->devname);
-clean1:
- cciss_destroy_hba_sysfs_entry(h);
-clean0:
- pci_release_regions(pdev);
-clean_no_release_regions:
- h->busy_initializing = 0;
-
- /*
- * Deliberately omit pci_disable_device(): it does something nasty to
- * Smart Array controllers that pci_enable_device does not undo
- */
- pci_set_drvdata(pdev, NULL);
- free_hba(h);
- return -ENODEV;
-}
-
-static void cciss_shutdown(struct pci_dev *pdev)
-{
- ctlr_info_t *h;
- char *flush_buf;
- int return_code;
-
- h = pci_get_drvdata(pdev);
- flush_buf = kzalloc(4, GFP_KERNEL);
- if (!flush_buf) {
- dev_warn(&h->pdev->dev, "cache not flushed, out of memory.\n");
- return;
- }
- /* write all data in the battery backed cache to disk */
- return_code = sendcmd_withirq(h, CCISS_CACHE_FLUSH, flush_buf,
- 4, 0, CTLR_LUNID, TYPE_CMD);
- kfree(flush_buf);
- if (return_code != IO_OK)
- dev_warn(&h->pdev->dev, "Error flushing cache\n");
- h->access.set_intr_mask(h, CCISS_INTR_OFF);
- free_irq(h->intr[h->intr_mode], h);
-}
-
-static int cciss_enter_simple_mode(struct ctlr_info *h)
-{
- u32 trans_support;
-
- trans_support = readl(&(h->cfgtable->TransportSupport));
- if (!(trans_support & SIMPLE_MODE))
- return -ENOTSUPP;
-
- h->max_commands = readl(&(h->cfgtable->CmdsOutMax));
- writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest));
- writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
- cciss_wait_for_mode_change_ack(h);
- print_cfg_table(h);
- if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) {
- dev_warn(&h->pdev->dev, "unable to get board into simple mode\n");
- return -ENODEV;
- }
- h->transMethod = CFGTBL_Trans_Simple;
- return 0;
-}
-
-
-static void cciss_remove_one(struct pci_dev *pdev)
-{
- ctlr_info_t *h;
- int i, j;
-
- if (pci_get_drvdata(pdev) == NULL) {
- dev_err(&pdev->dev, "Unable to remove device\n");
- return;
- }
-
- h = pci_get_drvdata(pdev);
- i = h->ctlr;
- if (hba[i] == NULL) {
- dev_err(&pdev->dev, "device appears to already be removed\n");
- return;
- }
-
- mutex_lock(&h->busy_shutting_down);
-
- remove_from_scan_list(h);
- remove_proc_entry(h->devname, proc_cciss);
- unregister_blkdev(h->major, h->devname);
-
- /* remove it from the disk list */
- for (j = 0; j < CISS_MAX_LUN; j++) {
- struct gendisk *disk = h->gendisk[j];
- if (disk) {
- struct request_queue *q = disk->queue;
-
- if (disk->flags & GENHD_FL_UP) {
- cciss_destroy_ld_sysfs_entry(h, j, 1);
- del_gendisk(disk);
- }
- if (q)
- blk_cleanup_queue(q);
- }
- }
-
-#ifdef CONFIG_CISS_SCSI_TAPE
- cciss_unregister_scsi(h); /* unhook from SCSI subsystem */
-#endif
-
- cciss_shutdown(pdev);
-
- pci_free_irq_vectors(h->pdev);
-
- iounmap(h->transtable);
- iounmap(h->cfgtable);
- iounmap(h->vaddr);
-
- cciss_free_cmd_pool(h);
- /* Free up sg elements */
- for (j = 0; j < h->nr_cmds; j++)
- kfree(h->scatter_list[j]);
- kfree(h->scatter_list);
- cciss_free_sg_chain_blocks(h->cmd_sg_list, h->nr_cmds);
- kfree(h->blockFetchTable);
- if (h->reply_pool)
- pci_free_consistent(h->pdev, h->max_commands * sizeof(__u64),
- h->reply_pool, h->reply_pool_dhandle);
- /*
- * Deliberately omit pci_disable_device(): it does something nasty to
- * Smart Array controllers that pci_enable_device does not undo
- */
- pci_release_regions(pdev);
- pci_set_drvdata(pdev, NULL);
- cciss_destroy_hba_sysfs_entry(h);
- mutex_unlock(&h->busy_shutting_down);
- free_hba(h);
-}
-
-static struct pci_driver cciss_pci_driver = {
- .name = "cciss",
- .probe = cciss_init_one,
- .remove = cciss_remove_one,
- .id_table = cciss_pci_device_id, /* id_table */
- .shutdown = cciss_shutdown,
-};
-
-/*
- * This is it. Register the PCI driver information for the cards we control
- * the OS will call our registered routines when it finds one of our cards.
- */
-static int __init cciss_init(void)
-{
- int err;
-
- /*
- * The hardware requires that commands are aligned on a 64-bit
- * boundary. Given that we use pci_alloc_consistent() to allocate an
- * array of them, the size must be a multiple of 8 bytes.
- */
- BUILD_BUG_ON(sizeof(CommandList_struct) % COMMANDLIST_ALIGNMENT);
- printk(KERN_INFO DRIVER_NAME "\n");
-
- err = bus_register(&cciss_bus_type);
- if (err)
- return err;
-
- /* Start the scan thread */
- cciss_scan_thread = kthread_run(scan_thread, NULL, "cciss_scan");
- if (IS_ERR(cciss_scan_thread)) {
- err = PTR_ERR(cciss_scan_thread);
- goto err_bus_unregister;
- }
-
- /* Register for our PCI devices */
- err = pci_register_driver(&cciss_pci_driver);
- if (err)
- goto err_thread_stop;
-
- return err;
-
-err_thread_stop:
- kthread_stop(cciss_scan_thread);
-err_bus_unregister:
- bus_unregister(&cciss_bus_type);
-
- return err;
-}
-
-static void __exit cciss_cleanup(void)
-{
- int i;
-
- pci_unregister_driver(&cciss_pci_driver);
- /* double check that all controller entrys have been removed */
- for (i = 0; i < MAX_CTLR; i++) {
- if (hba[i] != NULL) {
- dev_warn(&hba[i]->pdev->dev,
- "had to remove controller\n");
- cciss_remove_one(hba[i]->pdev);
- }
- }
- kthread_stop(cciss_scan_thread);
- if (proc_cciss)
- remove_proc_entry("driver/cciss", NULL);
- bus_unregister(&cciss_bus_type);
-}
-
-module_init(cciss_init);
-module_exit(cciss_cleanup);
diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
deleted file mode 100644
index 24b5fd75501a..000000000000
--- a/drivers/block/cciss.h
+++ /dev/null
@@ -1,433 +0,0 @@
-#ifndef CCISS_H
-#define CCISS_H
-
-#include <linux/genhd.h>
-#include <linux/mutex.h>
-
-#include "cciss_cmd.h"
-
-
-#define NWD_SHIFT 4
-#define MAX_PART (1 << NWD_SHIFT)
-
-#define IO_OK 0
-#define IO_ERROR 1
-#define IO_NEEDS_RETRY 3
-
-#define VENDOR_LEN 8
-#define MODEL_LEN 16
-#define REV_LEN 4
-
-struct ctlr_info;
-typedef struct ctlr_info ctlr_info_t;
-
-struct access_method {
- void (*submit_command)(ctlr_info_t *h, CommandList_struct *c);
- void (*set_intr_mask)(ctlr_info_t *h, unsigned long val);
- unsigned long (*fifo_full)(ctlr_info_t *h);
- bool (*intr_pending)(ctlr_info_t *h);
- unsigned long (*command_completed)(ctlr_info_t *h);
-};
-typedef struct _drive_info_struct
-{
- unsigned char LunID[8];
- int usage_count;
- struct request_queue *queue;
- sector_t nr_blocks;
- int block_size;
- int heads;
- int sectors;
- int cylinders;
- int raid_level; /* set to -1 to indicate that
- * the drive is not in use/configured
- */
- int busy_configuring; /* This is set when a drive is being removed
- * to prevent it from being opened or it's
- * queue from being started.
- */
- struct device dev;
- __u8 serial_no[16]; /* from inquiry page 0x83,
- * not necc. null terminated.
- */
- char vendor[VENDOR_LEN + 1]; /* SCSI vendor string */
- char model[MODEL_LEN + 1]; /* SCSI model string */
- char rev[REV_LEN + 1]; /* SCSI revision string */
- char device_initialized; /* indicates whether dev is initialized */
-} drive_info_struct;
-
-struct ctlr_info
-{
- int ctlr;
- char devname[8];
- char *product_name;
- char firm_ver[4]; /* Firmware version */
- struct pci_dev *pdev;
- __u32 board_id;
- void __iomem *vaddr;
- unsigned long paddr;
- int nr_cmds; /* Number of commands allowed on this controller */
- CfgTable_struct __iomem *cfgtable;
- int interrupts_enabled;
- int major;
- int max_commands;
- int commands_outstanding;
- int max_outstanding; /* Debug */
- int num_luns;
- int highest_lun;
- int usage_count; /* number of opens all all minor devices */
- /* Need space for temp sg list
- * number of scatter/gathers supported
- * number of scatter/gathers in chained block
- */
- struct scatterlist **scatter_list;
- int maxsgentries;
- int chainsize;
- int max_cmd_sgentries;
- SGDescriptor_struct **cmd_sg_list;
-
-# define PERF_MODE_INT 0
-# define DOORBELL_INT 1
-# define SIMPLE_MODE_INT 2
-# define MEMQ_MODE_INT 3
- unsigned int intr[4];
- int intr_mode;
- int cciss_max_sectors;
- BYTE cciss_read;
- BYTE cciss_write;
- BYTE cciss_read_capacity;
-
- /* information about each logical volume */
- drive_info_struct *drv[CISS_MAX_LUN];
-
- struct access_method access;
-
- /* queue and queue Info */
- struct list_head reqQ;
- struct list_head cmpQ;
- unsigned int Qdepth;
- unsigned int maxQsinceinit;
- unsigned int maxSG;
- spinlock_t lock;
-
- /* pointers to command and error info pool */
- CommandList_struct *cmd_pool;
- dma_addr_t cmd_pool_dhandle;
- ErrorInfo_struct *errinfo_pool;
- dma_addr_t errinfo_pool_dhandle;
- unsigned long *cmd_pool_bits;
- int nr_allocs;
- int nr_frees;
- int busy_configuring;
- int busy_initializing;
- int busy_scanning;
- struct mutex busy_shutting_down;
-
- /* This element holds the zero based queue number of the last
- * queue to be started. It is used for fairness.
- */
- int next_to_run;
-
- /* Disk structures we need to pass back */
- struct gendisk *gendisk[CISS_MAX_LUN];
-#ifdef CONFIG_CISS_SCSI_TAPE
- struct cciss_scsi_adapter_data_t *scsi_ctlr;
-#endif
- unsigned char alive;
- struct list_head scan_list;
- struct completion scan_wait;
- struct device dev;
- /*
- * Performant mode tables.
- */
- u32 trans_support;
- u32 trans_offset;
- struct TransTable_struct *transtable;
- unsigned long transMethod;
-
- /*
- * Performant mode completion buffer
- */
- u64 *reply_pool;
- dma_addr_t reply_pool_dhandle;
- u64 *reply_pool_head;
- size_t reply_pool_size;
- unsigned char reply_pool_wraparound;
- u32 *blockFetchTable;
-};
-
-/* Defining the diffent access_methods
- *
- * Memory mapped FIFO interface (SMART 53xx cards)
- */
-#define SA5_DOORBELL 0x20
-#define SA5_REQUEST_PORT_OFFSET 0x40
-#define SA5_REPLY_INTR_MASK_OFFSET 0x34
-#define SA5_REPLY_PORT_OFFSET 0x44
-#define SA5_INTR_STATUS 0x30
-#define SA5_SCRATCHPAD_OFFSET 0xB0
-
-#define SA5_CTCFG_OFFSET 0xB4
-#define SA5_CTMEM_OFFSET 0xB8
-
-#define SA5_INTR_OFF 0x08
-#define SA5B_INTR_OFF 0x04
-#define SA5_INTR_PENDING 0x08
-#define SA5B_INTR_PENDING 0x04
-#define FIFO_EMPTY 0xffffffff
-#define CCISS_FIRMWARE_READY 0xffff0000 /* value in scratchpad register */
-/* Perf. mode flags */
-#define SA5_PERF_INTR_PENDING 0x04
-#define SA5_PERF_INTR_OFF 0x05
-#define SA5_OUTDB_STATUS_PERF_BIT 0x01
-#define SA5_OUTDB_CLEAR_PERF_BIT 0x01
-#define SA5_OUTDB_CLEAR 0xA0
-#define SA5_OUTDB_CLEAR_PERF_BIT 0x01
-#define SA5_OUTDB_STATUS 0x9C
-
-
-#define CISS_ERROR_BIT 0x02
-
-#define CCISS_INTR_ON 1
-#define CCISS_INTR_OFF 0
-
-
-/* CCISS_BOARD_READY_WAIT_SECS is how long to wait for a board
- * to become ready, in seconds, before giving up on it.
- * CCISS_BOARD_READY_POLL_INTERVAL_MSECS * is how long to wait
- * between polling the board to see if it is ready, in
- * milliseconds. CCISS_BOARD_READY_ITERATIONS is derived
- * the above.
- */
-#define CCISS_BOARD_READY_WAIT_SECS (120)
-#define CCISS_BOARD_NOT_READY_WAIT_SECS (100)
-#define CCISS_BOARD_READY_POLL_INTERVAL_MSECS (100)
-#define CCISS_BOARD_READY_ITERATIONS \
- ((CCISS_BOARD_READY_WAIT_SECS * 1000) / \
- CCISS_BOARD_READY_POLL_INTERVAL_MSECS)
-#define CCISS_BOARD_NOT_READY_ITERATIONS \
- ((CCISS_BOARD_NOT_READY_WAIT_SECS * 1000) / \
- CCISS_BOARD_READY_POLL_INTERVAL_MSECS)
-#define CCISS_POST_RESET_PAUSE_MSECS (3000)
-#define CCISS_POST_RESET_NOOP_INTERVAL_MSECS (4000)
-#define CCISS_POST_RESET_NOOP_RETRIES (12)
-#define CCISS_POST_RESET_NOOP_TIMEOUT_MSECS (10000)
-
-/*
- Send the command to the hardware
-*/
-static void SA5_submit_command( ctlr_info_t *h, CommandList_struct *c)
-{
-#ifdef CCISS_DEBUG
- printk(KERN_WARNING "cciss%d: Sending %08x - down to controller\n",
- h->ctlr, c->busaddr);
-#endif /* CCISS_DEBUG */
- writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
- readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
- h->commands_outstanding++;
- if ( h->commands_outstanding > h->max_outstanding)
- h->max_outstanding = h->commands_outstanding;
-}
-
-/*
- * This card is the opposite of the other cards.
- * 0 turns interrupts on...
- * 0x08 turns them off...
- */
-static void SA5_intr_mask(ctlr_info_t *h, unsigned long val)
-{
- if (val)
- { /* Turn interrupts on */
- h->interrupts_enabled = 1;
- writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
- (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
- } else /* Turn them off */
- {
- h->interrupts_enabled = 0;
- writel( SA5_INTR_OFF,
- h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
- (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
- }
-}
-/*
- * This card is the opposite of the other cards.
- * 0 turns interrupts on...
- * 0x04 turns them off...
- */
-static void SA5B_intr_mask(ctlr_info_t *h, unsigned long val)
-{
- if (val)
- { /* Turn interrupts on */
- h->interrupts_enabled = 1;
- writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
- (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
- } else /* Turn them off */
- {
- h->interrupts_enabled = 0;
- writel( SA5B_INTR_OFF,
- h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
- (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
- }
-}
-
-/* Performant mode intr_mask */
-static void SA5_performant_intr_mask(ctlr_info_t *h, unsigned long val)
-{
- if (val) { /* turn on interrupts */
- h->interrupts_enabled = 1;
- writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
- (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
- } else {
- h->interrupts_enabled = 0;
- writel(SA5_PERF_INTR_OFF,
- h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
- (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
- }
-}
-
-/*
- * Returns true if fifo is full.
- *
- */
-static unsigned long SA5_fifo_full(ctlr_info_t *h)
-{
- if( h->commands_outstanding >= h->max_commands)
- return(1);
- else
- return(0);
-
-}
-/*
- * returns value read from hardware.
- * returns FIFO_EMPTY if there is nothing to read
- */
-static unsigned long SA5_completed(ctlr_info_t *h)
-{
- unsigned long register_value
- = readl(h->vaddr + SA5_REPLY_PORT_OFFSET);
- if(register_value != FIFO_EMPTY)
- {
- h->commands_outstanding--;
-#ifdef CCISS_DEBUG
- printk("cciss: Read %lx back from board\n", register_value);
-#endif /* CCISS_DEBUG */
- }
-#ifdef CCISS_DEBUG
- else
- {
- printk("cciss: FIFO Empty read\n");
- }
-#endif
- return ( register_value);
-
-}
-
-/* Performant mode command completed */
-static unsigned long SA5_performant_completed(ctlr_info_t *h)
-{
- unsigned long register_value = FIFO_EMPTY;
-
- /* flush the controller write of the reply queue by reading
- * outbound doorbell status register.
- */
- register_value = readl(h->vaddr + SA5_OUTDB_STATUS);
- /* msi auto clears the interrupt pending bit. */
- if (!(h->pdev->msi_enabled || h->pdev->msix_enabled)) {
- writel(SA5_OUTDB_CLEAR_PERF_BIT, h->vaddr + SA5_OUTDB_CLEAR);
- /* Do a read in order to flush the write to the controller
- * (as per spec.)
- */
- register_value = readl(h->vaddr + SA5_OUTDB_STATUS);
- }
-
- if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
- register_value = *(h->reply_pool_head);
- (h->reply_pool_head)++;
- h->commands_outstanding--;
- } else {
- register_value = FIFO_EMPTY;
- }
- /* Check for wraparound */
- if (h->reply_pool_head == (h->reply_pool + h->max_commands)) {
- h->reply_pool_head = h->reply_pool;
- h->reply_pool_wraparound ^= 1;
- }
-
- return register_value;
-}
-/*
- * Returns true if an interrupt is pending..
- */
-static bool SA5_intr_pending(ctlr_info_t *h)
-{
- unsigned long register_value =
- readl(h->vaddr + SA5_INTR_STATUS);
-#ifdef CCISS_DEBUG
- printk("cciss: intr_pending %lx\n", register_value);
-#endif /* CCISS_DEBUG */
- if( register_value & SA5_INTR_PENDING)
- return 1;
- return 0 ;
-}
-
-/*
- * Returns true if an interrupt is pending..
- */
-static bool SA5B_intr_pending(ctlr_info_t *h)
-{
- unsigned long register_value =
- readl(h->vaddr + SA5_INTR_STATUS);
-#ifdef CCISS_DEBUG
- printk("cciss: intr_pending %lx\n", register_value);
-#endif /* CCISS_DEBUG */
- if( register_value & SA5B_INTR_PENDING)
- return 1;
- return 0 ;
-}
-
-static bool SA5_performant_intr_pending(ctlr_info_t *h)
-{
- unsigned long register_value = readl(h->vaddr + SA5_INTR_STATUS);
-
- if (!register_value)
- return false;
-
- if (h->pdev->msi_enabled || h->pdev->msix_enabled)
- return true;
-
- /* Read outbound doorbell to flush */
- register_value = readl(h->vaddr + SA5_OUTDB_STATUS);
- return register_value & SA5_OUTDB_STATUS_PERF_BIT;
-}
-
-static struct access_method SA5_access = {
- .submit_command = SA5_submit_command,
- .set_intr_mask = SA5_intr_mask,
- .fifo_full = SA5_fifo_full,
- .intr_pending = SA5_intr_pending,
- .command_completed = SA5_completed,
-};
-
-static struct access_method SA5B_access = {
- .submit_command = SA5_submit_command,
- .set_intr_mask = SA5B_intr_mask,
- .fifo_full = SA5_fifo_full,
- .intr_pending = SA5B_intr_pending,
- .command_completed = SA5_completed,
-};
-
-static struct access_method SA5_performant_access = {
- .submit_command = SA5_submit_command,
- .set_intr_mask = SA5_performant_intr_mask,
- .fifo_full = SA5_fifo_full,
- .intr_pending = SA5_performant_intr_pending,
- .command_completed = SA5_performant_completed,
-};
-
-struct board_type {
- __u32 board_id;
- char *product_name;
- struct access_method *access;
- int nr_cmds; /* Max cmds this kind of ctlr can handle. */
-};
-
-#endif /* CCISS_H */
diff --git a/drivers/block/cciss_cmd.h b/drivers/block/cciss_cmd.h
deleted file mode 100644
index d9be6b4d49a6..000000000000
--- a/drivers/block/cciss_cmd.h
+++ /dev/null
@@ -1,269 +0,0 @@
-#ifndef CCISS_CMD_H
-#define CCISS_CMD_H
-
-#include <linux/cciss_defs.h>
-
-/* DEFINES */
-#define CISS_VERSION "1.00"
-
-/* general boundary definitions */
-#define MAXSGENTRIES 32
-#define CCISS_SG_CHAIN 0x80000000
-#define MAXREPLYQS 256
-
-/* Unit Attentions ASC's as defined for the MSA2012sa */
-#define POWER_OR_RESET 0x29
-#define STATE_CHANGED 0x2a
-#define UNIT_ATTENTION_CLEARED 0x2f
-#define LUN_FAILED 0x3e
-#define REPORT_LUNS_CHANGED 0x3f
-
-/* Unit Attentions ASCQ's as defined for the MSA2012sa */
-
- /* These ASCQ's defined for ASC = POWER_OR_RESET */
-#define POWER_ON_RESET 0x00
-#define POWER_ON_REBOOT 0x01
-#define SCSI_BUS_RESET 0x02
-#define MSA_TARGET_RESET 0x03
-#define CONTROLLER_FAILOVER 0x04
-#define TRANSCEIVER_SE 0x05
-#define TRANSCEIVER_LVD 0x06
-
- /* These ASCQ's defined for ASC = STATE_CHANGED */
-#define RESERVATION_PREEMPTED 0x03
-#define ASYM_ACCESS_CHANGED 0x06
-#define LUN_CAPACITY_CHANGED 0x09
-
-/* config space register offsets */
-#define CFG_VENDORID 0x00
-#define CFG_DEVICEID 0x02
-#define CFG_I2OBAR 0x10
-#define CFG_MEM1BAR 0x14
-
-/* i2o space register offsets */
-#define I2O_IBDB_SET 0x20
-#define I2O_IBDB_CLEAR 0x70
-#define I2O_INT_STATUS 0x30
-#define I2O_INT_MASK 0x34
-#define I2O_IBPOST_Q 0x40
-#define I2O_OBPOST_Q 0x44
-#define I2O_DMA1_CFG 0x214
-
-/* Configuration Table */
-#define CFGTBL_ChangeReq 0x00000001l
-#define CFGTBL_AccCmds 0x00000001l
-#define DOORBELL_CTLR_RESET 0x00000004l
-#define DOORBELL_CTLR_RESET2 0x00000020l
-
-#define CFGTBL_Trans_Simple 0x00000002l
-#define CFGTBL_Trans_Performant 0x00000004l
-#define CFGTBL_Trans_use_short_tags 0x20000000l
-
-#define CFGTBL_BusType_Ultra2 0x00000001l
-#define CFGTBL_BusType_Ultra3 0x00000002l
-#define CFGTBL_BusType_Fibre1G 0x00000100l
-#define CFGTBL_BusType_Fibre2G 0x00000200l
-typedef struct _vals32
-{
- __u32 lower;
- __u32 upper;
-} vals32;
-
-typedef union _u64bit
-{
- vals32 val32;
- __u64 val;
-} u64bit;
-
-/* Type defs used in the following structs */
-#define QWORD vals32
-
-/* STRUCTURES */
-#define CISS_MAX_PHYS_LUN 1024
-/* SCSI-3 Cmmands */
-
-#pragma pack(1)
-
-#define CISS_INQUIRY 0x12
-/* Date returned */
-typedef struct _InquiryData_struct
-{
- BYTE data_byte[36];
-} InquiryData_struct;
-
-#define CISS_REPORT_LOG 0xc2 /* Report Logical LUNs */
-#define CISS_REPORT_PHYS 0xc3 /* Report Physical LUNs */
-/* Data returned */
-typedef struct _ReportLUNdata_struct
-{
- BYTE LUNListLength[4];
- DWORD reserved;
- BYTE LUN[CISS_MAX_LUN][8];
-} ReportLunData_struct;
-
-#define CCISS_READ_CAPACITY 0x25 /* Read Capacity */
-typedef struct _ReadCapdata_struct
-{
- BYTE total_size[4]; /* Total size in blocks */
- BYTE block_size[4]; /* Size of blocks in bytes */
-} ReadCapdata_struct;
-
-#define CCISS_READ_CAPACITY_16 0x9e /* Read Capacity 16 */
-
-/* service action to differentiate a 16 byte read capacity from
- other commands that use the 0x9e SCSI op code */
-
-#define CCISS_READ_CAPACITY_16_SERVICE_ACT 0x10
-
-typedef struct _ReadCapdata_struct_16
-{
- BYTE total_size[8]; /* Total size in blocks */
- BYTE block_size[4]; /* Size of blocks in bytes */
- BYTE prot_en:1; /* protection enable bit */
- BYTE rto_en:1; /* reference tag own enable bit */
- BYTE reserved:6; /* reserved bits */
- BYTE reserved2[18]; /* reserved bytes per spec */
-} ReadCapdata_struct_16;
-
-/* Define the supported read/write commands for cciss based controllers */
-
-#define CCISS_READ_10 0x28 /* Read(10) */
-#define CCISS_WRITE_10 0x2a /* Write(10) */
-#define CCISS_READ_16 0x88 /* Read(16) */
-#define CCISS_WRITE_16 0x8a /* Write(16) */
-
-/* Define the CDB lengths supported by cciss based controllers */
-
-#define CDB_LEN10 10
-#define CDB_LEN16 16
-
-/* BMIC commands */
-#define BMIC_READ 0x26
-#define BMIC_WRITE 0x27
-#define BMIC_CACHE_FLUSH 0xc2
-#define CCISS_CACHE_FLUSH 0x01 /* C2 was already being used by CCISS */
-
-#define CCISS_ABORT_MSG 0x00
-#define CCISS_RESET_MSG 0x01
-#define CCISS_RESET_TYPE_CONTROLLER 0x00
-#define CCISS_RESET_TYPE_BUS 0x01
-#define CCISS_RESET_TYPE_TARGET 0x03
-#define CCISS_RESET_TYPE_LUN 0x04
-#define CCISS_NOOP_MSG 0x03
-
-/* Command List Structure */
-#define CTLR_LUNID "\0\0\0\0\0\0\0\0"
-
-typedef struct _CommandListHeader_struct {
- BYTE ReplyQueue;
- BYTE SGList;
- HWORD SGTotal;
- QWORD Tag;
- LUNAddr_struct LUN;
-} CommandListHeader_struct;
-typedef struct _ErrDescriptor_struct {
- QWORD Addr;
- DWORD Len;
-} ErrDescriptor_struct;
-typedef struct _SGDescriptor_struct {
- QWORD Addr;
- DWORD Len;
- DWORD Ext;
-} SGDescriptor_struct;
-
-/* Command types */
-#define CMD_RWREQ 0x00
-#define CMD_IOCTL_PEND 0x01
-#define CMD_SCSI 0x03
-#define CMD_MSG_DONE 0x04
-#define CMD_MSG_TIMEOUT 0x05
-#define CMD_MSG_STALE 0xff
-
-/* This structure needs to be divisible by COMMANDLIST_ALIGNMENT
- * because low bits of the address are used to to indicate that
- * whether the tag contains an index or an address. PAD_32 and
- * PAD_64 can be adjusted independently as needed for 32-bit
- * and 64-bits systems.
- */
-#define COMMANDLIST_ALIGNMENT (32)
-#define IS_64_BIT ((sizeof(long) - 4)/4)
-#define IS_32_BIT (!IS_64_BIT)
-#define PAD_32 (0)
-#define PAD_64 (4)
-#define PADSIZE (IS_32_BIT * PAD_32 + IS_64_BIT * PAD_64)
-#define DIRECT_LOOKUP_BIT 0x10
-#define DIRECT_LOOKUP_SHIFT 5
-
-typedef struct _CommandList_struct {
- CommandListHeader_struct Header;
- RequestBlock_struct Request;
- ErrDescriptor_struct ErrDesc;
- SGDescriptor_struct SG[MAXSGENTRIES];
- /* information associated with the command */
- __u32 busaddr; /* physical address of this record */
- ErrorInfo_struct * err_info; /* pointer to the allocated mem */
- int ctlr;
- int cmd_type;
- long cmdindex;
- struct list_head list;
- struct request * rq;
- struct completion *waiting;
- int retry_count;
- void * scsi_cmd;
- char pad[PADSIZE];
-} CommandList_struct;
-
-/* Configuration Table Structure */
-typedef struct _HostWrite_struct {
- DWORD TransportRequest;
- DWORD Reserved;
- DWORD CoalIntDelay;
- DWORD CoalIntCount;
-} HostWrite_struct;
-
-typedef struct _CfgTable_struct {
- BYTE Signature[4];
- DWORD SpecValence;
-#define SIMPLE_MODE 0x02
-#define PERFORMANT_MODE 0x04
-#define MEMQ_MODE 0x08
- DWORD TransportSupport;
- DWORD TransportActive;
- HostWrite_struct HostWrite;
- DWORD CmdsOutMax;
- DWORD BusTypes;
- DWORD TransMethodOffset;
- BYTE ServerName[16];
- DWORD HeartBeat;
- DWORD SCSI_Prefetch;
- DWORD MaxSGElements;
- DWORD MaxLogicalUnits;
- DWORD MaxPhysicalDrives;
- DWORD MaxPhysicalDrivesPerLogicalUnit;
- DWORD MaxPerformantModeCommands;
- u8 reserved[0x78 - 0x58];
- u32 misc_fw_support; /* offset 0x78 */
-#define MISC_FW_DOORBELL_RESET (0x02)
-#define MISC_FW_DOORBELL_RESET2 (0x10)
- u8 driver_version[32];
-} CfgTable_struct;
-
-struct TransTable_struct {
- u32 BlockFetch0;
- u32 BlockFetch1;
- u32 BlockFetch2;
- u32 BlockFetch3;
- u32 BlockFetch4;
- u32 BlockFetch5;
- u32 BlockFetch6;
- u32 BlockFetch7;
- u32 RepQSize;
- u32 RepQCount;
- u32 RepQCtrAddrLow32;
- u32 RepQCtrAddrHigh32;
- u32 RepQAddr0Low32;
- u32 RepQAddr0High32;
-};
-
-#pragma pack()
-#endif /* CCISS_CMD_H */
diff --git a/drivers/block/cciss_scsi.c b/drivers/block/cciss_scsi.c
deleted file mode 100644
index 01a1f7e24978..000000000000
--- a/drivers/block/cciss_scsi.c
+++ /dev/null
@@ -1,1653 +0,0 @@
-/*
- * Disk Array driver for HP Smart Array controllers, SCSI Tape module.
- * (C) Copyright 2001, 2007 Hewlett-Packard Development Company, L.P.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 300, Boston, MA
- * 02111-1307, USA.
- *
- * Questions/Comments/Bugfixes to iss_storagedev@hp.com
- *
- * Author: Stephen M. Cameron
- */
-#ifdef CONFIG_CISS_SCSI_TAPE
-
-/* Here we have code to present the driver as a scsi driver
- as it is simultaneously presented as a block driver. The
- reason for doing this is to allow access to SCSI tape drives
- through the array controller. Note in particular, neither
- physical nor logical disks are presented through the scsi layer. */
-
-#include <linux/timer.h>
-#include <linux/completion.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-
-#include <linux/atomic.h>
-
-#include <scsi/scsi_cmnd.h>
-#include <scsi/scsi_device.h>
-#include <scsi/scsi_host.h>
-
-#include "cciss_scsi.h"
-
-#define CCISS_ABORT_MSG 0x00
-#define CCISS_RESET_MSG 0x01
-
-static int fill_cmd(ctlr_info_t *h, CommandList_struct *c, __u8 cmd, void *buff,
- size_t size,
- __u8 page_code, unsigned char *scsi3addr,
- int cmd_type);
-
-static CommandList_struct *cmd_alloc(ctlr_info_t *h);
-static CommandList_struct *cmd_special_alloc(ctlr_info_t *h);
-static void cmd_free(ctlr_info_t *h, CommandList_struct *c);
-static void cmd_special_free(ctlr_info_t *h, CommandList_struct *c);
-
-static int cciss_scsi_write_info(struct Scsi_Host *sh,
- char *buffer, /* data buffer */
- int length); /* length of data in buffer */
-static int cciss_scsi_show_info(struct seq_file *m,
- struct Scsi_Host *sh);
-
-static int cciss_scsi_queue_command (struct Scsi_Host *h,
- struct scsi_cmnd *cmd);
-static int cciss_eh_device_reset_handler(struct scsi_cmnd *);
-static int cciss_eh_abort_handler(struct scsi_cmnd *);
-
-static struct cciss_scsi_hba_t ccissscsi[MAX_CTLR] = {
- { .name = "cciss0", .ndevices = 0 },
- { .name = "cciss1", .ndevices = 0 },
- { .name = "cciss2", .ndevices = 0 },
- { .name = "cciss3", .ndevices = 0 },
- { .name = "cciss4", .ndevices = 0 },
- { .name = "cciss5", .ndevices = 0 },
- { .name = "cciss6", .ndevices = 0 },
- { .name = "cciss7", .ndevices = 0 },
-};
-
-static struct scsi_host_template cciss_driver_template = {
- .module = THIS_MODULE,
- .name = "cciss",
- .proc_name = "cciss",
- .write_info = cciss_scsi_write_info,
- .show_info = cciss_scsi_show_info,
- .queuecommand = cciss_scsi_queue_command,
- .this_id = 7,
- .use_clustering = DISABLE_CLUSTERING,
- /* Can't have eh_bus_reset_handler or eh_host_reset_handler for cciss */
- .eh_device_reset_handler= cciss_eh_device_reset_handler,
- .eh_abort_handler = cciss_eh_abort_handler,
-};
-
-#pragma pack(1)
-
-#define SCSI_PAD_32 8
-#define SCSI_PAD_64 8
-
-struct cciss_scsi_cmd_stack_elem_t {
- CommandList_struct cmd;
- ErrorInfo_struct Err;
- __u32 busaddr;
- int cmdindex;
- u8 pad[IS_32_BIT * SCSI_PAD_32 + IS_64_BIT * SCSI_PAD_64];
-};
-
-#pragma pack()
-
-#pragma pack(1)
-struct cciss_scsi_cmd_stack_t {
- struct cciss_scsi_cmd_stack_elem_t *pool;
- struct cciss_scsi_cmd_stack_elem_t **elem;
- dma_addr_t cmd_pool_handle;
- int top;
- int nelems;
-};
-#pragma pack()
-
-struct cciss_scsi_adapter_data_t {
- struct Scsi_Host *scsi_host;
- struct cciss_scsi_cmd_stack_t cmd_stack;
- SGDescriptor_struct **cmd_sg_list;
- int registered;
- spinlock_t lock; // to protect ccissscsi[ctlr];
-};
-
-#define CPQ_TAPE_LOCK(h, flags) spin_lock_irqsave( \
- &h->scsi_ctlr->lock, flags);
-#define CPQ_TAPE_UNLOCK(h, flags) spin_unlock_irqrestore( \
- &h->scsi_ctlr->lock, flags);
-
-static CommandList_struct *
-scsi_cmd_alloc(ctlr_info_t *h)
-{
- /* assume only one process in here at a time, locking done by caller. */
- /* use h->lock */
- /* might be better to rewrite how we allocate scsi commands in a way that */
- /* needs no locking at all. */
-
- /* take the top memory chunk off the stack and return it, if any. */
- struct cciss_scsi_cmd_stack_elem_t *c;
- struct cciss_scsi_adapter_data_t *sa;
- struct cciss_scsi_cmd_stack_t *stk;
- u64bit temp64;
-
- sa = h->scsi_ctlr;
- stk = &sa->cmd_stack;
-
- if (stk->top < 0)
- return NULL;
- c = stk->elem[stk->top];
- /* memset(c, 0, sizeof(*c)); */
- memset(&c->cmd, 0, sizeof(c->cmd));
- memset(&c->Err, 0, sizeof(c->Err));
- /* set physical addr of cmd and addr of scsi parameters */
- c->cmd.busaddr = c->busaddr;
- c->cmd.cmdindex = c->cmdindex;
- /* (__u32) (stk->cmd_pool_handle +
- (sizeof(struct cciss_scsi_cmd_stack_elem_t)*stk->top)); */
-
- temp64.val = (__u64) (c->busaddr + sizeof(CommandList_struct));
- /* (__u64) (stk->cmd_pool_handle +
- (sizeof(struct cciss_scsi_cmd_stack_elem_t)*stk->top) +
- sizeof(CommandList_struct)); */
- stk->top--;
- c->cmd.ErrDesc.Addr.lower = temp64.val32.lower;
- c->cmd.ErrDesc.Addr.upper = temp64.val32.upper;
- c->cmd.ErrDesc.Len = sizeof(ErrorInfo_struct);
-
- c->cmd.ctlr = h->ctlr;
- c->cmd.err_info = &c->Err;
-
- return (CommandList_struct *) c;
-}
-
-static void
-scsi_cmd_free(ctlr_info_t *h, CommandList_struct *c)
-{
- /* assume only one process in here at a time, locking done by caller. */
- /* use h->lock */
- /* drop the free memory chunk on top of the stack. */
-
- struct cciss_scsi_adapter_data_t *sa;
- struct cciss_scsi_cmd_stack_t *stk;
-
- sa = h->scsi_ctlr;
- stk = &sa->cmd_stack;
- stk->top++;
- if (stk->top >= stk->nelems) {
- dev_err(&h->pdev->dev,
- "scsi_cmd_free called too many times.\n");
- BUG();
- }
- stk->elem[stk->top] = (struct cciss_scsi_cmd_stack_elem_t *) c;
-}
-
-static int
-scsi_cmd_stack_setup(ctlr_info_t *h, struct cciss_scsi_adapter_data_t *sa)
-{
- int i;
- struct cciss_scsi_cmd_stack_t *stk;
- size_t size;
-
- stk = &sa->cmd_stack;
- stk->nelems = cciss_tape_cmds + 2;
- sa->cmd_sg_list = cciss_allocate_sg_chain_blocks(h,
- h->chainsize, stk->nelems);
- if (!sa->cmd_sg_list && h->chainsize > 0)
- return -ENOMEM;
-
- size = sizeof(struct cciss_scsi_cmd_stack_elem_t) * stk->nelems;
-
- /* Check alignment, see cciss_cmd.h near CommandList_struct def. */
- BUILD_BUG_ON((sizeof(*stk->pool) % COMMANDLIST_ALIGNMENT) != 0);
- /* pci_alloc_consistent guarantees 32-bit DMA address will be used */
- stk->pool = (struct cciss_scsi_cmd_stack_elem_t *)
- pci_alloc_consistent(h->pdev, size, &stk->cmd_pool_handle);
-
- if (stk->pool == NULL) {
- cciss_free_sg_chain_blocks(sa->cmd_sg_list, stk->nelems);
- sa->cmd_sg_list = NULL;
- return -ENOMEM;
- }
- stk->elem = kmalloc(sizeof(stk->elem[0]) * stk->nelems, GFP_KERNEL);
- if (!stk->elem) {
- pci_free_consistent(h->pdev, size, stk->pool,
- stk->cmd_pool_handle);
- return -1;
- }
- for (i = 0; i < stk->nelems; i++) {
- stk->elem[i] = &stk->pool[i];
- stk->elem[i]->busaddr = (__u32) (stk->cmd_pool_handle +
- (sizeof(struct cciss_scsi_cmd_stack_elem_t) * i));
- stk->elem[i]->cmdindex = i;
- }
- stk->top = stk->nelems-1;
- return 0;
-}
-
-static void
-scsi_cmd_stack_free(ctlr_info_t *h)
-{
- struct cciss_scsi_adapter_data_t *sa;
- struct cciss_scsi_cmd_stack_t *stk;
- size_t size;
-
- sa = h->scsi_ctlr;
- stk = &sa->cmd_stack;
- if (stk->top != stk->nelems-1) {
- dev_warn(&h->pdev->dev,
- "bug: %d scsi commands are still outstanding.\n",
- stk->nelems - stk->top);
- }
- size = sizeof(struct cciss_scsi_cmd_stack_elem_t) * stk->nelems;
-
- pci_free_consistent(h->pdev, size, stk->pool, stk->cmd_pool_handle);
- stk->pool = NULL;
- cciss_free_sg_chain_blocks(sa->cmd_sg_list, stk->nelems);
- kfree(stk->elem);
- stk->elem = NULL;
-}
-
-#if 0
-static void
-print_cmd(CommandList_struct *cp)
-{
- printk("queue:%d\n", cp->Header.ReplyQueue);
- printk("sglist:%d\n", cp->Header.SGList);
- printk("sgtot:%d\n", cp->Header.SGTotal);
- printk("Tag:0x%08x/0x%08x\n", cp->Header.Tag.upper,
- cp->Header.Tag.lower);
- printk("LUN:0x%8phN\n", cp->Header.LUN.LunAddrBytes);
- printk("CDBLen:%d\n", cp->Request.CDBLen);
- printk("Type:%d\n",cp->Request.Type.Type);
- printk("Attr:%d\n",cp->Request.Type.Attribute);
- printk(" Dir:%d\n",cp->Request.Type.Direction);
- printk("Timeout:%d\n",cp->Request.Timeout);
- printk("CDB: %16ph\n", cp->Request.CDB);
- printk("edesc.Addr: 0x%08x/0%08x, Len = %d\n",
- cp->ErrDesc.Addr.upper, cp->ErrDesc.Addr.lower,
- cp->ErrDesc.Len);
- printk("sgs..........Errorinfo:\n");
- printk("scsistatus:%d\n", cp->err_info->ScsiStatus);
- printk("senselen:%d\n", cp->err_info->SenseLen);
- printk("cmd status:%d\n", cp->err_info->CommandStatus);
- printk("resid cnt:%d\n", cp->err_info->ResidualCnt);
- printk("offense size:%d\n", cp->err_info->MoreErrInfo.Invalid_Cmd.offense_size);
- printk("offense byte:%d\n", cp->err_info->MoreErrInfo.Invalid_Cmd.offense_num);
- printk("offense value:%d\n", cp->err_info->MoreErrInfo.Invalid_Cmd.offense_value);
-}
-#endif
-
-static int
-find_bus_target_lun(ctlr_info_t *h, int *bus, int *target, int *lun)
-{
- /* finds an unused bus, target, lun for a new device */
- /* assumes h->scsi_ctlr->lock is held */
- int i, found=0;
- unsigned char target_taken[CCISS_MAX_SCSI_DEVS_PER_HBA];
-
- memset(&target_taken[0], 0, CCISS_MAX_SCSI_DEVS_PER_HBA);
-
- target_taken[SELF_SCSI_ID] = 1;
- for (i = 0; i < ccissscsi[h->ctlr].ndevices; i++)
- target_taken[ccissscsi[h->ctlr].dev[i].target] = 1;
-
- for (i = 0; i < CCISS_MAX_SCSI_DEVS_PER_HBA; i++) {
- if (!target_taken[i]) {
- *bus = 0; *target=i; *lun = 0; found=1;
- break;
- }
- }
- return (!found);
-}
-struct scsi2map {
- char scsi3addr[8];
- int bus, target, lun;
-};
-
-static int
-cciss_scsi_add_entry(ctlr_info_t *h, int hostno,
- struct cciss_scsi_dev_t *device,
- struct scsi2map *added, int *nadded)
-{
- /* assumes h->scsi_ctlr->lock is held */
- int n = ccissscsi[h->ctlr].ndevices;
- struct cciss_scsi_dev_t *sd;
- int i, bus, target, lun;
- unsigned char addr1[8], addr2[8];
-
- if (n >= CCISS_MAX_SCSI_DEVS_PER_HBA) {
- dev_warn(&h->pdev->dev, "Too many devices, "
- "some will be inaccessible.\n");
- return -1;
- }
-
- bus = target = -1;
- lun = 0;
- /* Is this device a non-zero lun of a multi-lun device */
- /* byte 4 of the 8-byte LUN addr will contain the logical unit no. */
- if (device->scsi3addr[4] != 0) {
- /* Search through our list and find the device which */
- /* has the same 8 byte LUN address, excepting byte 4. */
- /* Assign the same bus and target for this new LUN. */
- /* Use the logical unit number from the firmware. */
- memcpy(addr1, device->scsi3addr, 8);
- addr1[4] = 0;
- for (i = 0; i < n; i++) {
- sd = &ccissscsi[h->ctlr].dev[i];
- memcpy(addr2, sd->scsi3addr, 8);
- addr2[4] = 0;
- /* differ only in byte 4? */
- if (memcmp(addr1, addr2, 8) == 0) {
- bus = sd->bus;
- target = sd->target;
- lun = device->scsi3addr[4];
- break;
- }
- }
- }
-
- sd = &ccissscsi[h->ctlr].dev[n];
- if (lun == 0) {
- if (find_bus_target_lun(h,
- &sd->bus, &sd->target, &sd->lun) != 0)
- return -1;
- } else {
- sd->bus = bus;
- sd->target = target;
- sd->lun = lun;
- }
- added[*nadded].bus = sd->bus;
- added[*nadded].target = sd->target;
- added[*nadded].lun = sd->lun;
- (*nadded)++;
-
- memcpy(sd->scsi3addr, device->scsi3addr, 8);
- memcpy(sd->vendor, device->vendor, sizeof(sd->vendor));
- memcpy(sd->revision, device->revision, sizeof(sd->revision));
- memcpy(sd->device_id, device->device_id, sizeof(sd->device_id));
- sd->devtype = device->devtype;
-
- ccissscsi[h->ctlr].ndevices++;
-
- /* initially, (before registering with scsi layer) we don't
- know our hostno and we don't want to print anything first
- time anyway (the scsi layer's inquiries will show that info) */
- if (hostno != -1)
- dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d added.\n",
- scsi_device_type(sd->devtype), hostno,
- sd->bus, sd->target, sd->lun);
- return 0;
-}
-
-static void
-cciss_scsi_remove_entry(ctlr_info_t *h, int hostno, int entry,
- struct scsi2map *removed, int *nremoved)
-{
- /* assumes h->ctlr]->scsi_ctlr->lock is held */
- int i;
- struct cciss_scsi_dev_t sd;
-
- if (entry < 0 || entry >= CCISS_MAX_SCSI_DEVS_PER_HBA) return;
- sd = ccissscsi[h->ctlr].dev[entry];
- removed[*nremoved].bus = sd.bus;
- removed[*nremoved].target = sd.target;
- removed[*nremoved].lun = sd.lun;
- (*nremoved)++;
- for (i = entry; i < ccissscsi[h->ctlr].ndevices-1; i++)
- ccissscsi[h->ctlr].dev[i] = ccissscsi[h->ctlr].dev[i+1];
- ccissscsi[h->ctlr].ndevices--;
- dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d removed.\n",
- scsi_device_type(sd.devtype), hostno,
- sd.bus, sd.target, sd.lun);
-}
-
-
-#define SCSI3ADDR_EQ(a,b) ( \
- (a)[7] == (b)[7] && \
- (a)[6] == (b)[6] && \
- (a)[5] == (b)[5] && \
- (a)[4] == (b)[4] && \
- (a)[3] == (b)[3] && \
- (a)[2] == (b)[2] && \
- (a)[1] == (b)[1] && \
- (a)[0] == (b)[0])
-
-static void fixup_botched_add(ctlr_info_t *h, char *scsi3addr)
-{
- /* called when scsi_add_device fails in order to re-adjust */
- /* ccissscsi[] to match the mid layer's view. */
- unsigned long flags;
- int i, j;
- CPQ_TAPE_LOCK(h, flags);
- for (i = 0; i < ccissscsi[h->ctlr].ndevices; i++) {
- if (memcmp(scsi3addr,
- ccissscsi[h->ctlr].dev[i].scsi3addr, 8) == 0) {
- for (j = i; j < ccissscsi[h->ctlr].ndevices-1; j++)
- ccissscsi[h->ctlr].dev[j] =
- ccissscsi[h->ctlr].dev[j+1];
- ccissscsi[h->ctlr].ndevices--;
- break;
- }
- }
- CPQ_TAPE_UNLOCK(h, flags);
-}
-
-static int device_is_the_same(struct cciss_scsi_dev_t *dev1,
- struct cciss_scsi_dev_t *dev2)
-{
- return dev1->devtype == dev2->devtype &&
- memcmp(dev1->scsi3addr, dev2->scsi3addr,
- sizeof(dev1->scsi3addr)) == 0 &&
- memcmp(dev1->device_id, dev2->device_id,
- sizeof(dev1->device_id)) == 0 &&
- memcmp(dev1->vendor, dev2->vendor,
- sizeof(dev1->vendor)) == 0 &&
- memcmp(dev1->model, dev2->model,
- sizeof(dev1->model)) == 0 &&
- memcmp(dev1->revision, dev2->revision,
- sizeof(dev1->revision)) == 0;
-}
-
-static int
-adjust_cciss_scsi_table(ctlr_info_t *h, int hostno,
- struct cciss_scsi_dev_t sd[], int nsds)
-{
- /* sd contains scsi3 addresses and devtypes, but
- bus target and lun are not filled in. This funciton
- takes what's in sd to be the current and adjusts
- ccissscsi[] to be in line with what's in sd. */
-
- int i,j, found, changes=0;
- struct cciss_scsi_dev_t *csd;
- unsigned long flags;
- struct scsi2map *added, *removed;
- int nadded, nremoved;
- struct Scsi_Host *sh = NULL;
-
- added = kzalloc(sizeof(*added) * CCISS_MAX_SCSI_DEVS_PER_HBA,
- GFP_KERNEL);
- removed = kzalloc(sizeof(*removed) * CCISS_MAX_SCSI_DEVS_PER_HBA,
- GFP_KERNEL);
-
- if (!added || !removed) {
- dev_warn(&h->pdev->dev,
- "Out of memory in adjust_cciss_scsi_table\n");
- goto free_and_out;
- }
-
- CPQ_TAPE_LOCK(h, flags);
-
- if (hostno != -1) /* if it's not the first time... */
- sh = h->scsi_ctlr->scsi_host;
-
- /* find any devices in ccissscsi[] that are not in
- sd[] and remove them from ccissscsi[] */
-
- i = 0;
- nremoved = 0;
- nadded = 0;
- while (i < ccissscsi[h->ctlr].ndevices) {
- csd = &ccissscsi[h->ctlr].dev[i];
- found=0;
- for (j=0;j<nsds;j++) {
- if (SCSI3ADDR_EQ(sd[j].scsi3addr,
- csd->scsi3addr)) {
- if (device_is_the_same(&sd[j], csd))
- found=2;
- else
- found=1;
- break;
- }
- }
-
- if (found == 0) { /* device no longer present. */
- changes++;
- cciss_scsi_remove_entry(h, hostno, i,
- removed, &nremoved);
- /* remove ^^^, hence i not incremented */
- } else if (found == 1) { /* device is different in some way */
- changes++;
- dev_info(&h->pdev->dev,
- "device c%db%dt%dl%d has changed.\n",
- hostno, csd->bus, csd->target, csd->lun);
- cciss_scsi_remove_entry(h, hostno, i,
- removed, &nremoved);
- /* remove ^^^, hence i not incremented */
- if (cciss_scsi_add_entry(h, hostno, &sd[j],
- added, &nadded) != 0)
- /* we just removed one, so add can't fail. */
- BUG();
- csd->devtype = sd[j].devtype;
- memcpy(csd->device_id, sd[j].device_id,
- sizeof(csd->device_id));
- memcpy(csd->vendor, sd[j].vendor,
- sizeof(csd->vendor));
- memcpy(csd->model, sd[j].model,
- sizeof(csd->model));
- memcpy(csd->revision, sd[j].revision,
- sizeof(csd->revision));
- } else /* device is same as it ever was, */
- i++; /* so just move along. */
- }
-
- /* Now, make sure every device listed in sd[] is also
- listed in ccissscsi[], adding them if they aren't found */
-
- for (i=0;i<nsds;i++) {
- found=0;
- for (j = 0; j < ccissscsi[h->ctlr].ndevices; j++) {
- csd = &ccissscsi[h->ctlr].dev[j];
- if (SCSI3ADDR_EQ(sd[i].scsi3addr,
- csd->scsi3addr)) {
- if (device_is_the_same(&sd[i], csd))
- found=2; /* found device */
- else
- found=1; /* found a bug. */
- break;
- }
- }
- if (!found) {
- changes++;
- if (cciss_scsi_add_entry(h, hostno, &sd[i],
- added, &nadded) != 0)
- break;
- } else if (found == 1) {
- /* should never happen... */
- changes++;
- dev_warn(&h->pdev->dev,
- "device unexpectedly changed\n");
- /* but if it does happen, we just ignore that device */
- }
- }
- CPQ_TAPE_UNLOCK(h, flags);
-
- /* Don't notify scsi mid layer of any changes the first time through */
- /* (or if there are no changes) scsi_scan_host will do it later the */
- /* first time through. */
- if (hostno == -1 || !changes)
- goto free_and_out;
-
- /* Notify scsi mid layer of any removed devices */
- for (i = 0; i < nremoved; i++) {
- struct scsi_device *sdev =
- scsi_device_lookup(sh, removed[i].bus,
- removed[i].target, removed[i].lun);
- if (sdev != NULL) {
- scsi_remove_device(sdev);
- scsi_device_put(sdev);
- } else {
- /* We don't expect to get here. */
- /* future cmds to this device will get selection */
- /* timeout as if the device was gone. */
- dev_warn(&h->pdev->dev, "didn't find "
- "c%db%dt%dl%d\n for removal.",
- hostno, removed[i].bus,
- removed[i].target, removed[i].lun);
- }
- }
-
- /* Notify scsi mid layer of any added devices */
- for (i = 0; i < nadded; i++) {
- int rc;
- rc = scsi_add_device(sh, added[i].bus,
- added[i].target, added[i].lun);
- if (rc == 0)
- continue;
- dev_warn(&h->pdev->dev, "scsi_add_device "
- "c%db%dt%dl%d failed, device not added.\n",
- hostno, added[i].bus, added[i].target, added[i].lun);
- /* now we have to remove it from ccissscsi, */
- /* since it didn't get added to scsi mid layer */
- fixup_botched_add(h, added[i].scsi3addr);
- }
-
-free_and_out:
- kfree(added);
- kfree(removed);
- return 0;
-}
-
-static int
-lookup_scsi3addr(ctlr_info_t *h, int bus, int target, int lun, char *scsi3addr)
-{
- int i;
- struct cciss_scsi_dev_t *sd;
- unsigned long flags;
-
- CPQ_TAPE_LOCK(h, flags);
- for (i = 0; i < ccissscsi[h->ctlr].ndevices; i++) {
- sd = &ccissscsi[h->ctlr].dev[i];
- if (sd->bus == bus &&
- sd->target == target &&
- sd->lun == lun) {
- memcpy(scsi3addr, &sd->scsi3addr[0], 8);
- CPQ_TAPE_UNLOCK(h, flags);
- return 0;
- }
- }
- CPQ_TAPE_UNLOCK(h, flags);
- return -1;
-}
-
-static void
-cciss_scsi_setup(ctlr_info_t *h)
-{
- struct cciss_scsi_adapter_data_t * shba;
-
- ccissscsi[h->ctlr].ndevices = 0;
- shba = kmalloc(sizeof(*shba), GFP_KERNEL);
- if (shba == NULL)
- return;
- shba->scsi_host = NULL;
- spin_lock_init(&shba->lock);
- shba->registered = 0;
- if (scsi_cmd_stack_setup(h, shba) != 0) {
- kfree(shba);
- shba = NULL;
- }
- h->scsi_ctlr = shba;
- return;
-}
-
-static void complete_scsi_command(CommandList_struct *c, int timeout,
- __u32 tag)
-{
- struct scsi_cmnd *cmd;
- ctlr_info_t *h;
- ErrorInfo_struct *ei;
-
- ei = c->err_info;
-
- /* First, see if it was a message rather than a command */
- if (c->Request.Type.Type == TYPE_MSG) {
- c->cmd_type = CMD_MSG_DONE;
- return;
- }
-
- cmd = (struct scsi_cmnd *) c->scsi_cmd;
- h = hba[c->ctlr];
-
- scsi_dma_unmap(cmd);
- if (c->Header.SGTotal > h->max_cmd_sgentries)
- cciss_unmap_sg_chain_block(h, c);
-
- cmd->result = (DID_OK << 16); /* host byte */
- cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */
- /* cmd->result |= (GOOD < 1); */ /* status byte */
-
- cmd->result |= (ei->ScsiStatus);
- /* printk("Scsistatus is 0x%02x\n", ei->ScsiStatus); */
-
- /* copy the sense data whether we need to or not. */
-
- memcpy(cmd->sense_buffer, ei->SenseInfo,
- ei->SenseLen > SCSI_SENSE_BUFFERSIZE ?
- SCSI_SENSE_BUFFERSIZE :
- ei->SenseLen);
- scsi_set_resid(cmd, ei->ResidualCnt);
-
- if (ei->CommandStatus != 0) { /* an error has occurred */
- switch (ei->CommandStatus) {
- case CMD_TARGET_STATUS:
- /* Pass it up to the upper layers... */
- if (!ei->ScsiStatus) {
-
- /* Ordinarily, this case should never happen, but there is a bug
- in some released firmware revisions that allows it to happen
- if, for example, a 4100 backplane loses power and the tape
- drive is in it. We assume that it's a fatal error of some
- kind because we can't show that it wasn't. We will make it
- look like selection timeout since that is the most common
- reason for this to occur, and it's severe enough. */
-
- cmd->result = DID_NO_CONNECT << 16;
- }
- break;
- case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
- break;
- case CMD_DATA_OVERRUN:
- dev_warn(&h->pdev->dev, "%p has"
- " completed with data overrun "
- "reported\n", c);
- break;
- case CMD_INVALID: {
- /*
- print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 1, c, sizeof(*c), false);
- print_cmd(c);
- */
- /* We get CMD_INVALID if you address a non-existent tape drive instead
- of a selection timeout (no response). You will see this if you yank
- out a tape drive, then try to access it. This is kind of a shame
- because it means that any other CMD_INVALID (e.g. driver bug) will
- get interpreted as a missing target. */
- cmd->result = DID_NO_CONNECT << 16;
- }
- break;
- case CMD_PROTOCOL_ERR:
- cmd->result = DID_ERROR << 16;
- dev_warn(&h->pdev->dev,
- "%p has protocol error\n", c);
- break;
- case CMD_HARDWARE_ERR:
- cmd->result = DID_ERROR << 16;
- dev_warn(&h->pdev->dev,
- "%p had hardware error\n", c);
- break;
- case CMD_CONNECTION_LOST:
- cmd->result = DID_ERROR << 16;
- dev_warn(&h->pdev->dev,
- "%p had connection lost\n", c);
- break;
- case CMD_ABORTED:
- cmd->result = DID_ABORT << 16;
- dev_warn(&h->pdev->dev, "%p was aborted\n", c);
- break;
- case CMD_ABORT_FAILED:
- cmd->result = DID_ERROR << 16;
- dev_warn(&h->pdev->dev,
- "%p reports abort failed\n", c);
- break;
- case CMD_UNSOLICITED_ABORT:
- cmd->result = DID_ABORT << 16;
- dev_warn(&h->pdev->dev, "%p aborted due to an "
- "unsolicited abort\n", c);
- break;
- case CMD_TIMEOUT:
- cmd->result = DID_TIME_OUT << 16;
- dev_warn(&h->pdev->dev, "%p timedout\n", c);
- break;
- case CMD_UNABORTABLE:
- cmd->result = DID_ERROR << 16;
- dev_warn(&h->pdev->dev, "c %p command "
- "unabortable\n", c);
- break;
- default:
- cmd->result = DID_ERROR << 16;
- dev_warn(&h->pdev->dev,
- "%p returned unknown status %x\n", c,
- ei->CommandStatus);
- }
- }
- cmd->scsi_done(cmd);
- scsi_cmd_free(h, c);
-}
-
-static int
-cciss_scsi_detect(ctlr_info_t *h)
-{
- struct Scsi_Host *sh;
- int error;
-
- sh = scsi_host_alloc(&cciss_driver_template, sizeof(struct ctlr_info *));
- if (sh == NULL)
- goto fail;
- sh->io_port = 0; // good enough? FIXME,
- sh->n_io_port = 0; // I don't think we use these two...
- sh->this_id = SELF_SCSI_ID;
- sh->can_queue = cciss_tape_cmds;
- sh->sg_tablesize = h->maxsgentries;
- sh->max_cmd_len = MAX_COMMAND_SIZE;
- sh->max_sectors = h->cciss_max_sectors;
-
- ((struct cciss_scsi_adapter_data_t *)
- h->scsi_ctlr)->scsi_host = sh;
- sh->hostdata[0] = (unsigned long) h;
- sh->irq = h->intr[SIMPLE_MODE_INT];
- sh->unique_id = sh->irq;
- error = scsi_add_host(sh, &h->pdev->dev);
- if (error)
- goto fail_host_put;
- scsi_scan_host(sh);
- return 1;
-
- fail_host_put:
- scsi_host_put(sh);
- fail:
- return 0;
-}
-
-static void
-cciss_unmap_one(struct pci_dev *pdev,
- CommandList_struct *c,
- size_t buflen,
- int data_direction)
-{
- u64bit addr64;
-
- addr64.val32.lower = c->SG[0].Addr.lower;
- addr64.val32.upper = c->SG[0].Addr.upper;
- pci_unmap_single(pdev, (dma_addr_t) addr64.val, buflen, data_direction);
-}
-
-static void
-cciss_map_one(struct pci_dev *pdev,
- CommandList_struct *c,
- unsigned char *buf,
- size_t buflen,
- int data_direction)
-{
- __u64 addr64;
-
- addr64 = (__u64) pci_map_single(pdev, buf, buflen, data_direction);
- c->SG[0].Addr.lower =
- (__u32) (addr64 & (__u64) 0x00000000FFFFFFFF);
- c->SG[0].Addr.upper =
- (__u32) ((addr64 >> 32) & (__u64) 0x00000000FFFFFFFF);
- c->SG[0].Len = buflen;
- c->Header.SGList = (__u8) 1; /* no. SGs contig in this cmd */
- c->Header.SGTotal = (__u16) 1; /* total sgs in this cmd list */
-}
-
-static int
-cciss_scsi_do_simple_cmd(ctlr_info_t *h,
- CommandList_struct *c,
- unsigned char *scsi3addr,
- unsigned char *cdb,
- unsigned char cdblen,
- unsigned char *buf, int bufsize,
- int direction)
-{
- DECLARE_COMPLETION_ONSTACK(wait);
-
- c->cmd_type = CMD_IOCTL_PEND; /* treat this like an ioctl */
- c->scsi_cmd = NULL;
- c->Header.ReplyQueue = 0; /* unused in simple mode */
- memcpy(&c->Header.LUN, scsi3addr, sizeof(c->Header.LUN));
- c->Header.Tag.lower = c->busaddr; /* Use k. address of cmd as tag */
- // Fill in the request block...
-
- /* printk("Using scsi3addr 0x%02x%0x2%0x2%0x2%0x2%0x2%0x2%0x2\n",
- scsi3addr[0], scsi3addr[1], scsi3addr[2], scsi3addr[3],
- scsi3addr[4], scsi3addr[5], scsi3addr[6], scsi3addr[7]); */
-
- memset(c->Request.CDB, 0, sizeof(c->Request.CDB));
- memcpy(c->Request.CDB, cdb, cdblen);
- c->Request.Timeout = 0;
- c->Request.CDBLen = cdblen;
- c->Request.Type.Type = TYPE_CMD;
- c->Request.Type.Attribute = ATTR_SIMPLE;
- c->Request.Type.Direction = direction;
-
- /* Fill in the SG list and do dma mapping */
- cciss_map_one(h->pdev, c, (unsigned char *) buf,
- bufsize, DMA_FROM_DEVICE);
-
- c->waiting = &wait;
- enqueue_cmd_and_start_io(h, c);
- wait_for_completion(&wait);
-
- /* undo the dma mapping */
- cciss_unmap_one(h->pdev, c, bufsize, DMA_FROM_DEVICE);
- return(0);
-}
-
-static void
-cciss_scsi_interpret_error(ctlr_info_t *h, CommandList_struct *c)
-{
- ErrorInfo_struct *ei;
-
- ei = c->err_info;
- switch (ei->CommandStatus) {
- case CMD_TARGET_STATUS:
- dev_warn(&h->pdev->dev,
- "cmd %p has completed with errors\n", c);
- dev_warn(&h->pdev->dev,
- "cmd %p has SCSI Status = %x\n",
- c, ei->ScsiStatus);
- if (ei->ScsiStatus == 0)
- dev_warn(&h->pdev->dev,
- "SCSI status is abnormally zero. "
- "(probably indicates selection timeout "
- "reported incorrectly due to a known "
- "firmware bug, circa July, 2001.)\n");
- break;
- case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
- dev_info(&h->pdev->dev, "UNDERRUN\n");
- break;
- case CMD_DATA_OVERRUN:
- dev_warn(&h->pdev->dev, "%p has"
- " completed with data overrun "
- "reported\n", c);
- break;
- case CMD_INVALID: {
- /* controller unfortunately reports SCSI passthru's */
- /* to non-existent targets as invalid commands. */
- dev_warn(&h->pdev->dev,
- "%p is reported invalid (probably means "
- "target device no longer present)\n", c);
- /*
- print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 1, c, sizeof(*c), false);
- print_cmd(c);
- */
- }
- break;
- case CMD_PROTOCOL_ERR:
- dev_warn(&h->pdev->dev, "%p has protocol error\n", c);
- break;
- case CMD_HARDWARE_ERR:
- /* cmd->result = DID_ERROR << 16; */
- dev_warn(&h->pdev->dev, "%p had hardware error\n", c);
- break;
- case CMD_CONNECTION_LOST:
- dev_warn(&h->pdev->dev, "%p had connection lost\n", c);
- break;
- case CMD_ABORTED:
- dev_warn(&h->pdev->dev, "%p was aborted\n", c);
- break;
- case CMD_ABORT_FAILED:
- dev_warn(&h->pdev->dev,
- "%p reports abort failed\n", c);
- break;
- case CMD_UNSOLICITED_ABORT:
- dev_warn(&h->pdev->dev,
- "%p aborted due to an unsolicited abort\n", c);
- break;
- case CMD_TIMEOUT:
- dev_warn(&h->pdev->dev, "%p timedout\n", c);
- break;
- case CMD_UNABORTABLE:
- dev_warn(&h->pdev->dev,
- "%p unabortable\n", c);
- break;
- default:
- dev_warn(&h->pdev->dev,
- "%p returned unknown status %x\n",
- c, ei->CommandStatus);
- }
-}
-
-static int
-cciss_scsi_do_inquiry(ctlr_info_t *h, unsigned char *scsi3addr,
- unsigned char page, unsigned char *buf,
- unsigned char bufsize)
-{
- int rc;
- CommandList_struct *c;
- char cdb[6];
- ErrorInfo_struct *ei;
- unsigned long flags;
-
- spin_lock_irqsave(&h->lock, flags);
- c = scsi_cmd_alloc(h);
- spin_unlock_irqrestore(&h->lock, flags);
-
- if (c == NULL) { /* trouble... */
- printk("cmd_alloc returned NULL!\n");
- return -1;
- }
-
- ei = c->err_info;
-
- cdb[0] = CISS_INQUIRY;
- cdb[1] = (page != 0);
- cdb[2] = page;
- cdb[3] = 0;
- cdb[4] = bufsize;
- cdb[5] = 0;
- rc = cciss_scsi_do_simple_cmd(h, c, scsi3addr, cdb,
- 6, buf, bufsize, XFER_READ);
-
- if (rc != 0) return rc; /* something went wrong */
-
- if (ei->CommandStatus != 0 &&
- ei->CommandStatus != CMD_DATA_UNDERRUN) {
- cciss_scsi_interpret_error(h, c);
- rc = -1;
- }
- spin_lock_irqsave(&h->lock, flags);
- scsi_cmd_free(h, c);
- spin_unlock_irqrestore(&h->lock, flags);
- return rc;
-}
-
-/* Get the device id from inquiry page 0x83 */
-static int cciss_scsi_get_device_id(ctlr_info_t *h, unsigned char *scsi3addr,
- unsigned char *device_id, int buflen)
-{
- int rc;
- unsigned char *buf;
-
- if (buflen > 16)
- buflen = 16;
- buf = kzalloc(64, GFP_KERNEL);
- if (!buf)
- return -1;
- rc = cciss_scsi_do_inquiry(h, scsi3addr, 0x83, buf, 64);
- if (rc == 0)
- memcpy(device_id, &buf[8], buflen);
- kfree(buf);
- return rc != 0;
-}
-
-static int
-cciss_scsi_do_report_phys_luns(ctlr_info_t *h,
- ReportLunData_struct *buf, int bufsize)
-{
- int rc;
- CommandList_struct *c;
- unsigned char cdb[12];
- unsigned char scsi3addr[8];
- ErrorInfo_struct *ei;
- unsigned long flags;
-
- spin_lock_irqsave(&h->lock, flags);
- c = scsi_cmd_alloc(h);
- spin_unlock_irqrestore(&h->lock, flags);
- if (c == NULL) { /* trouble... */
- printk("cmd_alloc returned NULL!\n");
- return -1;
- }
-
- memset(&scsi3addr[0], 0, 8); /* address the controller */
- cdb[0] = CISS_REPORT_PHYS;
- cdb[1] = 0;
- cdb[2] = 0;
- cdb[3] = 0;
- cdb[4] = 0;
- cdb[5] = 0;
- cdb[6] = (bufsize >> 24) & 0xFF; //MSB
- cdb[7] = (bufsize >> 16) & 0xFF;
- cdb[8] = (bufsize >> 8) & 0xFF;
- cdb[9] = bufsize & 0xFF;
- cdb[10] = 0;
- cdb[11] = 0;
-
- rc = cciss_scsi_do_simple_cmd(h, c, scsi3addr,
- cdb, 12,
- (unsigned char *) buf,
- bufsize, XFER_READ);
-
- if (rc != 0) return rc; /* something went wrong */
-
- ei = c->err_info;
- if (ei->CommandStatus != 0 &&
- ei->CommandStatus != CMD_DATA_UNDERRUN) {
- cciss_scsi_interpret_error(h, c);
- rc = -1;
- }
- spin_lock_irqsave(&h->lock, flags);
- scsi_cmd_free(h, c);
- spin_unlock_irqrestore(&h->lock, flags);
- return rc;
-}
-
-static void
-cciss_update_non_disk_devices(ctlr_info_t *h, int hostno)
-{
- /* the idea here is we could get notified from /proc
- that some devices have changed, so we do a report
- physical luns cmd, and adjust our list of devices
- accordingly. (We can't rely on the scsi-mid layer just
- doing inquiries, because the "busses" that the scsi
- mid-layer probes are totally fabricated by this driver,
- so new devices wouldn't show up.
-
- the scsi3addr's of devices won't change so long as the
- adapter is not reset. That means we can rescan and
- tell which devices we already know about, vs. new
- devices, vs. disappearing devices.
-
- Also, if you yank out a tape drive, then put in a disk
- in it's place, (say, a configured volume from another
- array controller for instance) _don't_ poke this driver
- (so it thinks it's still a tape, but _do_ poke the scsi
- mid layer, so it does an inquiry... the scsi mid layer
- will see the physical disk. This would be bad. Need to
- think about how to prevent that. One idea would be to
- snoop all scsi responses and if an inquiry repsonse comes
- back that reports a disk, chuck it an return selection
- timeout instead and adjust our table... Not sure i like
- that though.
-
- */
-#define OBDR_TAPE_INQ_SIZE 49
-#define OBDR_TAPE_SIG "$DR-10"
- ReportLunData_struct *ld_buff;
- unsigned char *inq_buff;
- unsigned char scsi3addr[8];
- __u32 num_luns=0;
- unsigned char *ch;
- struct cciss_scsi_dev_t *currentsd, *this_device;
- int ncurrent=0;
- int reportlunsize = sizeof(*ld_buff) + CISS_MAX_PHYS_LUN * 8;
- int i;
-
- ld_buff = kzalloc(reportlunsize, GFP_KERNEL);
- inq_buff = kmalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);
- currentsd = kzalloc(sizeof(*currentsd) *
- (CCISS_MAX_SCSI_DEVS_PER_HBA+1), GFP_KERNEL);
- if (ld_buff == NULL || inq_buff == NULL || currentsd == NULL) {
- printk(KERN_ERR "cciss: out of memory\n");
- goto out;
- }
- this_device = &currentsd[CCISS_MAX_SCSI_DEVS_PER_HBA];
- if (cciss_scsi_do_report_phys_luns(h, ld_buff, reportlunsize) == 0) {
- ch = &ld_buff->LUNListLength[0];
- num_luns = ((ch[0]<<24) | (ch[1]<<16) | (ch[2]<<8) | ch[3]) / 8;
- if (num_luns > CISS_MAX_PHYS_LUN) {
- printk(KERN_WARNING
- "cciss: Maximum physical LUNs (%d) exceeded. "
- "%d LUNs ignored.\n", CISS_MAX_PHYS_LUN,
- num_luns - CISS_MAX_PHYS_LUN);
- num_luns = CISS_MAX_PHYS_LUN;
- }
- }
- else {
- printk(KERN_ERR "cciss: Report physical LUNs failed.\n");
- goto out;
- }
-
-
- /* adjust our table of devices */
- for (i = 0; i < num_luns; i++) {
- /* for each physical lun, do an inquiry */
- if (ld_buff->LUN[i][3] & 0xC0) continue;
- memset(inq_buff, 0, OBDR_TAPE_INQ_SIZE);
- memcpy(&scsi3addr[0], &ld_buff->LUN[i][0], 8);
-
- if (cciss_scsi_do_inquiry(h, scsi3addr, 0, inq_buff,
- (unsigned char) OBDR_TAPE_INQ_SIZE) != 0)
- /* Inquiry failed (msg printed already) */
- continue; /* so we will skip this device. */
-
- this_device->devtype = (inq_buff[0] & 0x1f);
- this_device->bus = -1;
- this_device->target = -1;
- this_device->lun = -1;
- memcpy(this_device->scsi3addr, scsi3addr, 8);
- memcpy(this_device->vendor, &inq_buff[8],
- sizeof(this_device->vendor));
- memcpy(this_device->model, &inq_buff[16],
- sizeof(this_device->model));
- memcpy(this_device->revision, &inq_buff[32],
- sizeof(this_device->revision));
- memset(this_device->device_id, 0,
- sizeof(this_device->device_id));
- cciss_scsi_get_device_id(h, scsi3addr,
- this_device->device_id, sizeof(this_device->device_id));
-
- switch (this_device->devtype) {
- case 0x05: /* CD-ROM */ {
-
- /* We don't *really* support actual CD-ROM devices,
- * just this "One Button Disaster Recovery" tape drive
- * which temporarily pretends to be a CD-ROM drive.
- * So we check that the device is really an OBDR tape
- * device by checking for "$DR-10" in bytes 43-48 of
- * the inquiry data.
- */
- char obdr_sig[7];
-
- strncpy(obdr_sig, &inq_buff[43], 6);
- obdr_sig[6] = '\0';
- if (strncmp(obdr_sig, OBDR_TAPE_SIG, 6) != 0)
- /* Not OBDR device, ignore it. */
- break;
- }
- /* fall through . . . */
- case 0x01: /* sequential access, (tape) */
- case 0x08: /* medium changer */
- if (ncurrent >= CCISS_MAX_SCSI_DEVS_PER_HBA) {
- printk(KERN_INFO "cciss%d: %s ignored, "
- "too many devices.\n", h->ctlr,
- scsi_device_type(this_device->devtype));
- break;
- }
- currentsd[ncurrent] = *this_device;
- ncurrent++;
- break;
- default:
- break;
- }
- }
-
- adjust_cciss_scsi_table(h, hostno, currentsd, ncurrent);
-out:
- kfree(inq_buff);
- kfree(ld_buff);
- kfree(currentsd);
- return;
-}
-
-static int
-is_keyword(char *ptr, int len, char *verb) // Thanks to ncr53c8xx.c
-{
- int verb_len = strlen(verb);
- if (len >= verb_len && !memcmp(verb,ptr,verb_len))
- return verb_len;
- else
- return 0;
-}
-
-static int
-cciss_scsi_user_command(ctlr_info_t *h, int hostno, char *buffer, int length)
-{
- int arg_len;
-
- if ((arg_len = is_keyword(buffer, length, "rescan")) != 0)
- cciss_update_non_disk_devices(h, hostno);
- else
- return -EINVAL;
- return length;
-}
-
-static int
-cciss_scsi_write_info(struct Scsi_Host *sh,
- char *buffer, /* data buffer */
- int length) /* length of data in buffer */
-{
- ctlr_info_t *h = (ctlr_info_t *) sh->hostdata[0];
- if (h == NULL) /* This really shouldn't ever happen. */
- return -EINVAL;
-
- return cciss_scsi_user_command(h, sh->host_no,
- buffer, length);
-}
-
-static int
-cciss_scsi_show_info(struct seq_file *m, struct Scsi_Host *sh)
-{
-
- ctlr_info_t *h = (ctlr_info_t *) sh->hostdata[0];
- int i;
-
- if (h == NULL) /* This really shouldn't ever happen. */
- return -EINVAL;
-
- seq_printf(m, "cciss%d: SCSI host: %d\n",
- h->ctlr, sh->host_no);
-
- /* this information is needed by apps to know which cciss
- device corresponds to which scsi host number without
- having to open a scsi target device node. The device
- information is not a duplicate of /proc/scsi/scsi because
- the two may be out of sync due to scsi hotplug, rather
- this info is for an app to be able to use to know how to
- get them back in sync. */
-
- for (i = 0; i < ccissscsi[h->ctlr].ndevices; i++) {
- struct cciss_scsi_dev_t *sd =
- &ccissscsi[h->ctlr].dev[i];
- seq_printf(m, "c%db%dt%dl%d %02d "
- "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
- sh->host_no, sd->bus, sd->target, sd->lun,
- sd->devtype,
- sd->scsi3addr[0], sd->scsi3addr[1],
- sd->scsi3addr[2], sd->scsi3addr[3],
- sd->scsi3addr[4], sd->scsi3addr[5],
- sd->scsi3addr[6], sd->scsi3addr[7]);
- }
- return 0;
-}
-
-/* cciss_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci
- dma mapping and fills in the scatter gather entries of the
- cciss command, c. */
-
-static void cciss_scatter_gather(ctlr_info_t *h, CommandList_struct *c,
- struct scsi_cmnd *cmd)
-{
- unsigned int len;
- struct scatterlist *sg;
- __u64 addr64;
- int request_nsgs, i, chained, sg_index;
- struct cciss_scsi_adapter_data_t *sa = h->scsi_ctlr;
- SGDescriptor_struct *curr_sg;
-
- BUG_ON(scsi_sg_count(cmd) > h->maxsgentries);
-
- chained = 0;
- sg_index = 0;
- curr_sg = c->SG;
- request_nsgs = scsi_dma_map(cmd);
- if (request_nsgs) {
- scsi_for_each_sg(cmd, sg, request_nsgs, i) {
- if (sg_index + 1 == h->max_cmd_sgentries &&
- !chained && request_nsgs - i > 1) {
- chained = 1;
- sg_index = 0;
- curr_sg = sa->cmd_sg_list[c->cmdindex];
- }
- addr64 = (__u64) sg_dma_address(sg);
- len = sg_dma_len(sg);
- curr_sg[sg_index].Addr.lower =
- (__u32) (addr64 & 0x0FFFFFFFFULL);
- curr_sg[sg_index].Addr.upper =
- (__u32) ((addr64 >> 32) & 0x0FFFFFFFFULL);
- curr_sg[sg_index].Len = len;
- curr_sg[sg_index].Ext = 0;
- ++sg_index;
- }
- if (chained)
- cciss_map_sg_chain_block(h, c,
- sa->cmd_sg_list[c->cmdindex],
- (request_nsgs - (h->max_cmd_sgentries - 1)) *
- sizeof(SGDescriptor_struct));
- }
- /* track how many SG entries we are using */
- if (request_nsgs > h->maxSG)
- h->maxSG = request_nsgs;
- c->Header.SGTotal = (u16) request_nsgs + chained;
- if (request_nsgs > h->max_cmd_sgentries)
- c->Header.SGList = h->max_cmd_sgentries;
- else
- c->Header.SGList = c->Header.SGTotal;
- return;
-}
-
-
-static int
-cciss_scsi_queue_command_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
-{
- ctlr_info_t *h;
- int rc;
- unsigned char scsi3addr[8];
- CommandList_struct *c;
- unsigned long flags;
-
- // Get the ptr to our adapter structure (hba[i]) out of cmd->host.
- // We violate cmd->host privacy here. (Is there another way?)
- h = (ctlr_info_t *) cmd->device->host->hostdata[0];
-
- rc = lookup_scsi3addr(h, cmd->device->channel, cmd->device->id,
- cmd->device->lun, scsi3addr);
- if (rc != 0) {
- /* the scsi nexus does not match any that we presented... */
- /* pretend to mid layer that we got selection timeout */
- cmd->result = DID_NO_CONNECT << 16;
- done(cmd);
- /* we might want to think about registering controller itself
- as a processor device on the bus so sg binds to it. */
- return 0;
- }
-
- /* Ok, we have a reasonable scsi nexus, so send the cmd down, and
- see what the device thinks of it. */
-
- spin_lock_irqsave(&h->lock, flags);
- c = scsi_cmd_alloc(h);
- spin_unlock_irqrestore(&h->lock, flags);
- if (c == NULL) { /* trouble... */
- dev_warn(&h->pdev->dev, "scsi_cmd_alloc returned NULL!\n");
- /* FIXME: next 3 lines are -> BAD! <- */
- cmd->result = DID_NO_CONNECT << 16;
- done(cmd);
- return 0;
- }
-
- // Fill in the command list header
-
- cmd->scsi_done = done; // save this for use by completion code
-
- /* save c in case we have to abort it */
- cmd->host_scribble = (unsigned char *) c;
-
- c->cmd_type = CMD_SCSI;
- c->scsi_cmd = cmd;
- c->Header.ReplyQueue = 0; /* unused in simple mode */
- memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8);
- c->Header.Tag.lower = c->busaddr; /* Use k. address of cmd as tag */
-
- // Fill in the request block...
-
- c->Request.Timeout = 0;
- memset(c->Request.CDB, 0, sizeof(c->Request.CDB));
- BUG_ON(cmd->cmd_len > sizeof(c->Request.CDB));
- c->Request.CDBLen = cmd->cmd_len;
- memcpy(c->Request.CDB, cmd->cmnd, cmd->cmd_len);
- c->Request.Type.Type = TYPE_CMD;
- c->Request.Type.Attribute = ATTR_SIMPLE;
- switch (cmd->sc_data_direction) {
- case DMA_TO_DEVICE:
- c->Request.Type.Direction = XFER_WRITE;
- break;
- case DMA_FROM_DEVICE:
- c->Request.Type.Direction = XFER_READ;
- break;
- case DMA_NONE:
- c->Request.Type.Direction = XFER_NONE;
- break;
- case DMA_BIDIRECTIONAL:
- // This can happen if a buggy application does a scsi passthru
- // and sets both inlen and outlen to non-zero. ( see
- // ../scsi/scsi_ioctl.c:scsi_ioctl_send_command() )
-
- c->Request.Type.Direction = XFER_RSVD;
- // This is technically wrong, and cciss controllers should
- // reject it with CMD_INVALID, which is the most correct
- // response, but non-fibre backends appear to let it
- // slide by, and give the same results as if this field
- // were set correctly. Either way is acceptable for
- // our purposes here.
-
- break;
-
- default:
- dev_warn(&h->pdev->dev, "unknown data direction: %d\n",
- cmd->sc_data_direction);
- BUG();
- break;
- }
- cciss_scatter_gather(h, c, cmd);
- enqueue_cmd_and_start_io(h, c);
- /* the cmd'll come back via intr handler in complete_scsi_command() */
- return 0;
-}
-
-static DEF_SCSI_QCMD(cciss_scsi_queue_command)
-
-static void cciss_unregister_scsi(ctlr_info_t *h)
-{
- struct cciss_scsi_adapter_data_t *sa;
- struct cciss_scsi_cmd_stack_t *stk;
- unsigned long flags;
-
- /* we are being forcibly unloaded, and may not refuse. */
-
- spin_lock_irqsave(&h->lock, flags);
- sa = h->scsi_ctlr;
- stk = &sa->cmd_stack;
-
- /* if we weren't ever actually registered, don't unregister */
- if (sa->registered) {
- spin_unlock_irqrestore(&h->lock, flags);
- scsi_remove_host(sa->scsi_host);
- scsi_host_put(sa->scsi_host);
- spin_lock_irqsave(&h->lock, flags);
- }
-
- /* set scsi_host to NULL so our detect routine will
- find us on register */
- sa->scsi_host = NULL;
- spin_unlock_irqrestore(&h->lock, flags);
- scsi_cmd_stack_free(h);
- kfree(sa);
-}
-
-static int cciss_engage_scsi(ctlr_info_t *h)
-{
- struct cciss_scsi_adapter_data_t *sa;
- struct cciss_scsi_cmd_stack_t *stk;
- unsigned long flags;
-
- spin_lock_irqsave(&h->lock, flags);
- sa = h->scsi_ctlr;
- stk = &sa->cmd_stack;
-
- if (sa->registered) {
- dev_info(&h->pdev->dev, "SCSI subsystem already engaged.\n");
- spin_unlock_irqrestore(&h->lock, flags);
- return -ENXIO;
- }
- sa->registered = 1;
- spin_unlock_irqrestore(&h->lock, flags);
- cciss_update_non_disk_devices(h, -1);
- cciss_scsi_detect(h);
- return 0;
-}
-
-static void
-cciss_seq_tape_report(struct seq_file *seq, ctlr_info_t *h)
-{
- unsigned long flags;
-
- CPQ_TAPE_LOCK(h, flags);
- seq_printf(seq,
- "Sequential access devices: %d\n\n",
- ccissscsi[h->ctlr].ndevices);
- CPQ_TAPE_UNLOCK(h, flags);
-}
-
-static int wait_for_device_to_become_ready(ctlr_info_t *h,
- unsigned char lunaddr[])
-{
- int rc;
- int count = 0;
- int waittime = HZ;
- CommandList_struct *c;
-
- c = cmd_alloc(h);
- if (!c) {
- dev_warn(&h->pdev->dev, "out of memory in "
- "wait_for_device_to_become_ready.\n");
- return IO_ERROR;
- }
-
- /* Send test unit ready until device ready, or give up. */
- while (count < 20) {
-
- /* Wait for a bit. do this first, because if we send
- * the TUR right away, the reset will just abort it.
- */
- schedule_timeout_uninterruptible(waittime);
- count++;
-
- /* Increase wait time with each try, up to a point. */
- if (waittime < (HZ * 30))
- waittime = waittime * 2;
-
- /* Send the Test Unit Ready */
- rc = fill_cmd(h, c, TEST_UNIT_READY, NULL, 0, 0,
- lunaddr, TYPE_CMD);
- if (rc == 0)
- rc = sendcmd_withirq_core(h, c, 0);
-
- (void) process_sendcmd_error(h, c);
-
- if (rc != 0)
- goto retry_tur;
-
- if (c->err_info->CommandStatus == CMD_SUCCESS)
- break;
-
- if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
- c->err_info->ScsiStatus == SAM_STAT_CHECK_CONDITION) {
- if (c->err_info->SenseInfo[2] == NO_SENSE)
- break;
- if (c->err_info->SenseInfo[2] == UNIT_ATTENTION) {
- unsigned char asc;
- asc = c->err_info->SenseInfo[12];
- check_for_unit_attention(h, c);
- if (asc == POWER_OR_RESET)
- break;
- }
- }
-retry_tur:
- dev_warn(&h->pdev->dev, "Waiting %d secs "
- "for device to become ready.\n",
- waittime / HZ);
- rc = 1; /* device not ready. */
- }
-
- if (rc)
- dev_warn(&h->pdev->dev, "giving up on device.\n");
- else
- dev_warn(&h->pdev->dev, "device is ready.\n");
-
- cmd_free(h, c);
- return rc;
-}
-
-/* Need at least one of these error handlers to keep ../scsi/hosts.c from
- * complaining. Doing a host- or bus-reset can't do anything good here.
- * Despite what it might say in scsi_error.c, there may well be commands
- * on the controller, as the cciss driver registers twice, once as a block
- * device for the logical drives, and once as a scsi device, for any tape
- * drives. So we know there are no commands out on the tape drives, but we
- * don't know there are no commands on the controller, and it is likely
- * that there probably are, as the cciss block device is most commonly used
- * as a boot device (embedded controller on HP/Compaq systems.)
-*/
-
-static int cciss_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
-{
- int rc;
- CommandList_struct *cmd_in_trouble;
- unsigned char lunaddr[8];
- ctlr_info_t *h;
-
- /* find the controller to which the command to be aborted was sent */
- h = (ctlr_info_t *) scsicmd->device->host->hostdata[0];
- if (h == NULL) /* paranoia */
- return FAILED;
- dev_warn(&h->pdev->dev, "resetting tape drive or medium changer.\n");
- /* find the command that's giving us trouble */
- cmd_in_trouble = (CommandList_struct *) scsicmd->host_scribble;
- if (cmd_in_trouble == NULL) /* paranoia */
- return FAILED;
- memcpy(lunaddr, &cmd_in_trouble->Header.LUN.LunAddrBytes[0], 8);
- /* send a reset to the SCSI LUN which the command was sent to */
- rc = sendcmd_withirq(h, CCISS_RESET_MSG, NULL, 0, 0, lunaddr,
- TYPE_MSG);
- if (rc == 0 && wait_for_device_to_become_ready(h, lunaddr) == 0)
- return SUCCESS;
- dev_warn(&h->pdev->dev, "resetting device failed.\n");
- return FAILED;
-}
-
-static int cciss_eh_abort_handler(struct scsi_cmnd *scsicmd)
-{
- int rc;
- CommandList_struct *cmd_to_abort;
- unsigned char lunaddr[8];
- ctlr_info_t *h;
-
- /* find the controller to which the command to be aborted was sent */
- h = (ctlr_info_t *) scsicmd->device->host->hostdata[0];
- if (h == NULL) /* paranoia */
- return FAILED;
- dev_warn(&h->pdev->dev, "aborting tardy SCSI cmd\n");
-
- /* find the command to be aborted */
- cmd_to_abort = (CommandList_struct *) scsicmd->host_scribble;
- if (cmd_to_abort == NULL) /* paranoia */
- return FAILED;
- memcpy(lunaddr, &cmd_to_abort->Header.LUN.LunAddrBytes[0], 8);
- rc = sendcmd_withirq(h, CCISS_ABORT_MSG, &cmd_to_abort->Header.Tag,
- 0, 0, lunaddr, TYPE_MSG);
- if (rc == 0)
- return SUCCESS;
- return FAILED;
-
-}
-
-#else /* no CONFIG_CISS_SCSI_TAPE */
-
-/* If no tape support, then these become defined out of existence */
-
-#define cciss_scsi_setup(cntl_num)
-#define cciss_engage_scsi(h)
-
-#endif /* CONFIG_CISS_SCSI_TAPE */
diff --git a/drivers/block/cciss_scsi.h b/drivers/block/cciss_scsi.h
deleted file mode 100644
index e71d986727ca..000000000000
--- a/drivers/block/cciss_scsi.h
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
- * Disk Array driver for HP Smart Array controllers, SCSI Tape module.
- * (C) Copyright 2001, 2007 Hewlett-Packard Development Company, L.P.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 300, Boston, MA
- * 02111-1307, USA.
- *
- * Questions/Comments/Bugfixes to iss_storagedev@hp.com
- *
- */
-#ifdef CONFIG_CISS_SCSI_TAPE
-#ifndef _CCISS_SCSI_H_
-#define _CCISS_SCSI_H_
-
-#include <scsi/scsicam.h> /* possibly irrelevant, since we don't show disks */
-
- /* the scsi id of the adapter... */
-#define SELF_SCSI_ID 15
- /* 15 is somewhat arbitrary, since the scsi-2 bus
- that's presented by the driver to the OS is
- fabricated. The "real" scsi-3 bus the
- hardware presents is fabricated too.
- The actual, honest-to-goodness physical
- bus that the devices are attached to is not
- addressible natively, and may in fact turn
- out to be not scsi at all. */
-
-
-/*
-
-If the upper scsi layer tries to track how many commands we have
-outstanding, it will be operating under the misapprehension that it is
-the only one sending us requests. We also have the block interface,
-which is where most requests must surely come from, so the upper layer's
-notion of how many requests we have outstanding will be wrong most or
-all of the time.
-
-Note, the normal SCSI mid-layer error handling doesn't work well
-for this driver because 1) it takes the io_request_lock before
-calling error handlers and uses a local variable to store flags,
-so the io_request_lock cannot be released and interrupts enabled
-inside the error handlers, and, the error handlers cannot poll
-for command completion because they might get commands from the
-block half of the driver completing, and not know what to do
-with them. That's what we get for making a hybrid scsi/block
-driver, I suppose.
-
-*/
-
-struct cciss_scsi_dev_t {
- int devtype;
- int bus, target, lun; /* as presented to the OS */
- unsigned char scsi3addr[8]; /* as presented to the HW */
- unsigned char device_id[16]; /* from inquiry pg. 0x83 */
- unsigned char vendor[8]; /* bytes 8-15 of inquiry data */
- unsigned char model[16]; /* bytes 16-31 of inquiry data */
- unsigned char revision[4]; /* bytes 32-35 of inquiry data */
-};
-
-struct cciss_scsi_hba_t {
- char *name;
- int ndevices;
-#define CCISS_MAX_SCSI_DEVS_PER_HBA 16
- struct cciss_scsi_dev_t dev[CCISS_MAX_SCSI_DEVS_PER_HBA];
-};
-
-#endif /* _CCISS_SCSI_H_ */
-#endif /* CONFIG_CISS_SCSI_TAPE */
diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c
index e02c45cd3c5a..5f0eaee8c8a7 100644
--- a/drivers/block/drbd/drbd_actlog.c
+++ b/drivers/block/drbd/drbd_actlog.c
@@ -151,7 +151,7 @@ static int _drbd_md_sync_page_io(struct drbd_device *device,
op_flags |= REQ_SYNC;
bio = bio_alloc_drbd(GFP_NOIO);
- bio->bi_bdev = bdev->md_bdev;
+ bio_set_dev(bio, bdev->md_bdev);
bio->bi_iter.bi_sector = sector;
err = -EIO;
if (bio_add_page(bio, device->md_io.page, size, 0) != size)
diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
index 809fd245c3dc..bd97908c766f 100644
--- a/drivers/block/drbd/drbd_bitmap.c
+++ b/drivers/block/drbd/drbd_bitmap.c
@@ -1019,7 +1019,7 @@ static void bm_page_io_async(struct drbd_bm_aio_ctx *ctx, int page_nr) __must_ho
bm_store_page_idx(page, page_nr);
} else
page = b->bm_pages[page_nr];
- bio->bi_bdev = device->ldev->md_bdev;
+ bio_set_dev(bio, device->ldev->md_bdev);
bio->bi_iter.bi_sector = on_disk_sector;
/* bio_add_page of a single page to an empty bio will always succeed,
* according to api. Do we want to assert that? */
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index d17b6e6393c7..7e8589ce631c 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -63,19 +63,15 @@
# define __must_hold(x)
#endif
-/* module parameter, defined in drbd_main.c */
-extern unsigned int minor_count;
-extern bool disable_sendpage;
-extern bool allow_oos;
-void tl_abort_disk_io(struct drbd_device *device);
-
+/* shared module parameters, defined in drbd_main.c */
#ifdef CONFIG_DRBD_FAULT_INJECTION
-extern int enable_faults;
-extern int fault_rate;
-extern int fault_devs;
+extern int drbd_enable_faults;
+extern int drbd_fault_rate;
#endif
-extern char usermode_helper[];
+extern unsigned int drbd_minor_count;
+extern char drbd_usermode_helper[];
+extern int drbd_proc_details;
/* This is used to stop/restart our threads.
@@ -181,8 +177,8 @@ _drbd_insert_fault(struct drbd_device *device, unsigned int type);
static inline int
drbd_insert_fault(struct drbd_device *device, unsigned int type) {
#ifdef CONFIG_DRBD_FAULT_INJECTION
- return fault_rate &&
- (enable_faults & (1<<type)) &&
+ return drbd_fault_rate &&
+ (drbd_enable_faults & (1<<type)) &&
_drbd_insert_fault(device, type);
#else
return 0;
@@ -745,6 +741,8 @@ struct drbd_connection {
unsigned current_tle_writes; /* writes seen within this tl epoch */
unsigned long last_reconnect_jif;
+ /* empty member on older kernels without blk_start_plug() */
+ struct blk_plug receiver_plug;
struct drbd_thread receiver;
struct drbd_thread worker;
struct drbd_thread ack_receiver;
@@ -1131,7 +1129,8 @@ extern void conn_send_sr_reply(struct drbd_connection *connection, enum drbd_sta
extern int drbd_send_rs_deallocated(struct drbd_peer_device *, struct drbd_peer_request *);
extern void drbd_backing_dev_free(struct drbd_device *device, struct drbd_backing_dev *ldev);
extern void drbd_device_cleanup(struct drbd_device *device);
-void drbd_print_uuids(struct drbd_device *device, const char *text);
+extern void drbd_print_uuids(struct drbd_device *device, const char *text);
+extern void drbd_queue_unplug(struct drbd_device *device);
extern void conn_md_sync(struct drbd_connection *connection);
extern void drbd_md_write(struct drbd_device *device, void *buffer);
@@ -1463,8 +1462,6 @@ extern struct drbd_resource *drbd_find_resource(const char *name);
extern void drbd_destroy_resource(struct kref *kref);
extern void conn_free_crypto(struct drbd_connection *connection);
-extern int proc_details;
-
/* drbd_req */
extern void do_submit(struct work_struct *ws);
extern void __drbd_make_request(struct drbd_device *, struct bio *, unsigned long);
@@ -1628,8 +1625,8 @@ static inline void drbd_generic_make_request(struct drbd_device *device,
int fault_type, struct bio *bio)
{
__release(local);
- if (!bio->bi_bdev) {
- drbd_err(device, "drbd_generic_make_request: bio->bi_bdev == NULL\n");
+ if (!bio->bi_disk) {
+ drbd_err(device, "drbd_generic_make_request: bio->bi_disk == NULL\n");
bio->bi_status = BLK_STS_IOERR;
bio_endio(bio);
return;
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index e2ed28d45ce1..8cb3791898ae 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -77,41 +77,41 @@ MODULE_PARM_DESC(minor_count, "Approximate number of drbd devices ("
MODULE_ALIAS_BLOCKDEV_MAJOR(DRBD_MAJOR);
#include <linux/moduleparam.h>
-/* allow_open_on_secondary */
-MODULE_PARM_DESC(allow_oos, "DONT USE!");
/* thanks to these macros, if compiled into the kernel (not-module),
- * this becomes the boot parameter drbd.minor_count */
-module_param(minor_count, uint, 0444);
-module_param(disable_sendpage, bool, 0644);
-module_param(allow_oos, bool, 0);
-module_param(proc_details, int, 0644);
+ * these become boot parameters (e.g., drbd.minor_count) */
#ifdef CONFIG_DRBD_FAULT_INJECTION
-int enable_faults;
-int fault_rate;
-static int fault_count;
-int fault_devs;
+int drbd_enable_faults;
+int drbd_fault_rate;
+static int drbd_fault_count;
+static int drbd_fault_devs;
/* bitmap of enabled faults */
-module_param(enable_faults, int, 0664);
+module_param_named(enable_faults, drbd_enable_faults, int, 0664);
/* fault rate % value - applies to all enabled faults */
-module_param(fault_rate, int, 0664);
+module_param_named(fault_rate, drbd_fault_rate, int, 0664);
/* count of faults inserted */
-module_param(fault_count, int, 0664);
+module_param_named(fault_count, drbd_fault_count, int, 0664);
/* bitmap of devices to insert faults on */
-module_param(fault_devs, int, 0644);
+module_param_named(fault_devs, drbd_fault_devs, int, 0644);
#endif
-/* module parameter, defined */
-unsigned int minor_count = DRBD_MINOR_COUNT_DEF;
-bool disable_sendpage;
-bool allow_oos;
-int proc_details; /* Detail level in proc drbd*/
-
+/* module parameters we can keep static */
+static bool drbd_allow_oos; /* allow_open_on_secondary */
+static bool drbd_disable_sendpage;
+MODULE_PARM_DESC(allow_oos, "DONT USE!");
+module_param_named(allow_oos, drbd_allow_oos, bool, 0);
+module_param_named(disable_sendpage, drbd_disable_sendpage, bool, 0644);
+
+/* module parameters we share */
+int drbd_proc_details; /* Detail level in proc drbd*/
+module_param_named(proc_details, drbd_proc_details, int, 0644);
+/* module parameters shared with defaults */
+unsigned int drbd_minor_count = DRBD_MINOR_COUNT_DEF;
/* Module parameter for setting the user mode helper program
* to run. Default is /sbin/drbdadm */
-char usermode_helper[80] = "/sbin/drbdadm";
-
-module_param_string(usermode_helper, usermode_helper, sizeof(usermode_helper), 0644);
+char drbd_usermode_helper[80] = "/sbin/drbdadm";
+module_param_named(minor_count, drbd_minor_count, uint, 0444);
+module_param_string(usermode_helper, drbd_usermode_helper, sizeof(drbd_usermode_helper), 0644);
/* in 2.6.x, our device mapping and config info contains our virtual gendisks
* as member "struct gendisk *vdisk;"
@@ -923,7 +923,9 @@ void drbd_gen_and_send_sync_uuid(struct drbd_peer_device *peer_device)
}
/* communicated if (agreed_features & DRBD_FF_WSAME) */
-void assign_p_sizes_qlim(struct drbd_device *device, struct p_sizes *p, struct request_queue *q)
+static void
+assign_p_sizes_qlim(struct drbd_device *device, struct p_sizes *p,
+ struct request_queue *q)
{
if (q) {
p->qlim->physical_block_size = cpu_to_be32(queue_physical_block_size(q));
@@ -1560,7 +1562,7 @@ static int _drbd_send_page(struct drbd_peer_device *peer_device, struct page *pa
* put_page(); and would cause either a VM_BUG directly, or
* __page_cache_release a page that would actually still be referenced
* by someone, leading to some obscure delayed Oops somewhere else. */
- if (disable_sendpage || (page_count(page) < 1) || PageSlab(page))
+ if (drbd_disable_sendpage || (page_count(page) < 1) || PageSlab(page))
return _drbd_no_send_page(peer_device, page, offset, size, msg_flags);
msg_flags |= MSG_NOSIGNAL;
@@ -1932,7 +1934,7 @@ static int drbd_open(struct block_device *bdev, fmode_t mode)
if (device->state.role != R_PRIMARY) {
if (mode & FMODE_WRITE)
rv = -EROFS;
- else if (!allow_oos)
+ else if (!drbd_allow_oos)
rv = -EMEDIUMTYPE;
}
@@ -1952,6 +1954,19 @@ static void drbd_release(struct gendisk *gd, fmode_t mode)
mutex_unlock(&drbd_main_mutex);
}
+/* need to hold resource->req_lock */
+void drbd_queue_unplug(struct drbd_device *device)
+{
+ if (device->state.pdsk >= D_INCONSISTENT && device->state.conn >= C_CONNECTED) {
+ D_ASSERT(device, device->state.role == R_PRIMARY);
+ if (test_and_clear_bit(UNPLUG_REMOTE, &device->flags)) {
+ drbd_queue_work_if_unqueued(
+ &first_peer_device(device)->connection->sender_work,
+ &device->unplug_work);
+ }
+ }
+}
+
static void drbd_set_defaults(struct drbd_device *device)
{
/* Beware! The actual layout differs
@@ -2008,18 +2023,14 @@ void drbd_init_set_defaults(struct drbd_device *device)
device->unplug_work.cb = w_send_write_hint;
device->bm_io_work.w.cb = w_bitmap_io;
- init_timer(&device->resync_timer);
- init_timer(&device->md_sync_timer);
- init_timer(&device->start_resync_timer);
- init_timer(&device->request_timer);
- device->resync_timer.function = resync_timer_fn;
- device->resync_timer.data = (unsigned long) device;
- device->md_sync_timer.function = md_sync_timer_fn;
- device->md_sync_timer.data = (unsigned long) device;
- device->start_resync_timer.function = start_resync_timer_fn;
- device->start_resync_timer.data = (unsigned long) device;
- device->request_timer.function = request_timer_fn;
- device->request_timer.data = (unsigned long) device;
+ setup_timer(&device->resync_timer, resync_timer_fn,
+ (unsigned long)device);
+ setup_timer(&device->md_sync_timer, md_sync_timer_fn,
+ (unsigned long)device);
+ setup_timer(&device->start_resync_timer, start_resync_timer_fn,
+ (unsigned long)device);
+ setup_timer(&device->request_timer, request_timer_fn,
+ (unsigned long)device);
init_waitqueue_head(&device->misc_wait);
init_waitqueue_head(&device->state_wait);
@@ -2131,7 +2142,7 @@ static void drbd_destroy_mempools(void)
static int drbd_create_mempools(void)
{
struct page *page;
- const int number = (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * minor_count;
+ const int number = (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * drbd_minor_count;
int i;
/* prepare our caches and mempools */
@@ -2167,13 +2178,12 @@ static int drbd_create_mempools(void)
goto Enomem;
/* mempools */
- drbd_io_bio_set = bioset_create(BIO_POOL_SIZE, 0, BIOSET_NEED_RESCUER);
+ drbd_io_bio_set = bioset_create(BIO_POOL_SIZE, 0, 0);
if (drbd_io_bio_set == NULL)
goto Enomem;
drbd_md_io_bio_set = bioset_create(DRBD_MIN_POOL_PAGES, 0,
- BIOSET_NEED_BVECS |
- BIOSET_NEED_RESCUER);
+ BIOSET_NEED_BVECS);
if (drbd_md_io_bio_set == NULL)
goto Enomem;
@@ -2409,7 +2419,6 @@ static void drbd_cleanup(void)
destroy_workqueue(retry.wq);
drbd_genl_unregister();
- drbd_debugfs_cleanup();
idr_for_each_entry(&drbd_devices, device, i)
drbd_delete_device(device);
@@ -2420,6 +2429,8 @@ static void drbd_cleanup(void)
drbd_free_resource(resource);
}
+ drbd_debugfs_cleanup();
+
drbd_destroy_mempools();
unregister_blkdev(DRBD_MAJOR, "drbd");
@@ -2972,12 +2983,12 @@ static int __init drbd_init(void)
{
int err;
- if (minor_count < DRBD_MINOR_COUNT_MIN || minor_count > DRBD_MINOR_COUNT_MAX) {
- pr_err("invalid minor_count (%d)\n", minor_count);
+ if (drbd_minor_count < DRBD_MINOR_COUNT_MIN || drbd_minor_count > DRBD_MINOR_COUNT_MAX) {
+ pr_err("invalid minor_count (%d)\n", drbd_minor_count);
#ifdef MODULE
return -EINVAL;
#else
- minor_count = DRBD_MINOR_COUNT_DEF;
+ drbd_minor_count = DRBD_MINOR_COUNT_DEF;
#endif
}
@@ -3900,12 +3911,12 @@ _drbd_insert_fault(struct drbd_device *device, unsigned int type)
static struct fault_random_state rrs = {0, 0};
unsigned int ret = (
- (fault_devs == 0 ||
- ((1 << device_to_minor(device)) & fault_devs) != 0) &&
- (((_drbd_fault_random(&rrs) % 100) + 1) <= fault_rate));
+ (drbd_fault_devs == 0 ||
+ ((1 << device_to_minor(device)) & drbd_fault_devs) != 0) &&
+ (((_drbd_fault_random(&rrs) % 100) + 1) <= drbd_fault_rate));
if (ret) {
- fault_count++;
+ drbd_fault_count++;
if (__ratelimit(&drbd_ratelimit_state))
drbd_warn(device, "***Simulating %s failure\n",
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index ad0fcb43e45c..a12f77e6891e 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -344,7 +344,7 @@ int drbd_khelper(struct drbd_device *device, char *cmd)
(char[60]) { }, /* address */
NULL };
char mb[14];
- char *argv[] = {usermode_helper, cmd, mb, NULL };
+ char *argv[] = {drbd_usermode_helper, cmd, mb, NULL };
struct drbd_connection *connection = first_peer_device(device)->connection;
struct sib_info sib;
int ret;
@@ -359,19 +359,19 @@ int drbd_khelper(struct drbd_device *device, char *cmd)
* write out any unsynced meta data changes now */
drbd_md_sync(device);
- drbd_info(device, "helper command: %s %s %s\n", usermode_helper, cmd, mb);
+ drbd_info(device, "helper command: %s %s %s\n", drbd_usermode_helper, cmd, mb);
sib.sib_reason = SIB_HELPER_PRE;
sib.helper_name = cmd;
drbd_bcast_event(device, &sib);
notify_helper(NOTIFY_CALL, device, connection, cmd, 0);
- ret = call_usermodehelper(usermode_helper, argv, envp, UMH_WAIT_PROC);
+ ret = call_usermodehelper(drbd_usermode_helper, argv, envp, UMH_WAIT_PROC);
if (ret)
drbd_warn(device, "helper command: %s %s %s exit code %u (0x%x)\n",
- usermode_helper, cmd, mb,
+ drbd_usermode_helper, cmd, mb,
(ret >> 8) & 0xff, ret);
else
drbd_info(device, "helper command: %s %s %s exit code %u (0x%x)\n",
- usermode_helper, cmd, mb,
+ drbd_usermode_helper, cmd, mb,
(ret >> 8) & 0xff, ret);
sib.sib_reason = SIB_HELPER_POST;
sib.helper_exit_code = ret;
@@ -396,24 +396,24 @@ enum drbd_peer_state conn_khelper(struct drbd_connection *connection, char *cmd)
(char[60]) { }, /* address */
NULL };
char *resource_name = connection->resource->name;
- char *argv[] = {usermode_helper, cmd, resource_name, NULL };
+ char *argv[] = {drbd_usermode_helper, cmd, resource_name, NULL };
int ret;
setup_khelper_env(connection, envp);
conn_md_sync(connection);
- drbd_info(connection, "helper command: %s %s %s\n", usermode_helper, cmd, resource_name);
+ drbd_info(connection, "helper command: %s %s %s\n", drbd_usermode_helper, cmd, resource_name);
/* TODO: conn_bcast_event() ?? */
notify_helper(NOTIFY_CALL, NULL, connection, cmd, 0);
- ret = call_usermodehelper(usermode_helper, argv, envp, UMH_WAIT_PROC);
+ ret = call_usermodehelper(drbd_usermode_helper, argv, envp, UMH_WAIT_PROC);
if (ret)
drbd_warn(connection, "helper command: %s %s %s exit code %u (0x%x)\n",
- usermode_helper, cmd, resource_name,
+ drbd_usermode_helper, cmd, resource_name,
(ret >> 8) & 0xff, ret);
else
drbd_info(connection, "helper command: %s %s %s exit code %u (0x%x)\n",
- usermode_helper, cmd, resource_name,
+ drbd_usermode_helper, cmd, resource_name,
(ret >> 8) & 0xff, ret);
/* TODO: conn_bcast_event() ?? */
notify_helper(NOTIFY_RESPONSE, NULL, connection, cmd, ret);
@@ -1236,12 +1236,18 @@ static void fixup_discard_if_not_supported(struct request_queue *q)
static void decide_on_write_same_support(struct drbd_device *device,
struct request_queue *q,
- struct request_queue *b, struct o_qlim *o)
+ struct request_queue *b, struct o_qlim *o,
+ bool disable_write_same)
{
struct drbd_peer_device *peer_device = first_peer_device(device);
struct drbd_connection *connection = peer_device->connection;
bool can_do = b ? b->limits.max_write_same_sectors : true;
+ if (can_do && disable_write_same) {
+ can_do = false;
+ drbd_info(peer_device, "WRITE_SAME disabled by config\n");
+ }
+
if (can_do && connection->cstate >= C_CONNECTED && !(connection->agreed_features & DRBD_FF_WSAME)) {
can_do = false;
drbd_info(peer_device, "peer does not support WRITE_SAME\n");
@@ -1302,6 +1308,7 @@ static void drbd_setup_queue_param(struct drbd_device *device, struct drbd_backi
struct request_queue *b = NULL;
struct disk_conf *dc;
bool discard_zeroes_if_aligned = true;
+ bool disable_write_same = false;
if (bdev) {
b = bdev->backing_bdev->bd_disk->queue;
@@ -1311,6 +1318,7 @@ static void drbd_setup_queue_param(struct drbd_device *device, struct drbd_backi
dc = rcu_dereference(device->ldev->disk_conf);
max_segments = dc->max_bio_bvecs;
discard_zeroes_if_aligned = dc->discard_zeroes_if_aligned;
+ disable_write_same = dc->disable_write_same;
rcu_read_unlock();
blk_set_stacking_limits(&q->limits);
@@ -1321,7 +1329,7 @@ static void drbd_setup_queue_param(struct drbd_device *device, struct drbd_backi
blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS);
blk_queue_segment_boundary(q, PAGE_SIZE-1);
decide_on_discard_support(device, q, b, discard_zeroes_if_aligned);
- decide_on_write_same_support(device, q, b, o);
+ decide_on_write_same_support(device, q, b, o, disable_write_same);
if (b) {
blk_queue_stack_limits(q, b);
@@ -1612,7 +1620,8 @@ int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
if (write_ordering_changed(old_disk_conf, new_disk_conf))
drbd_bump_write_ordering(device->resource, NULL, WO_BDEV_FLUSH);
- if (old_disk_conf->discard_zeroes_if_aligned != new_disk_conf->discard_zeroes_if_aligned)
+ if (old_disk_conf->discard_zeroes_if_aligned != new_disk_conf->discard_zeroes_if_aligned
+ || old_disk_conf->disable_write_same != new_disk_conf->disable_write_same)
drbd_reconsider_queue_parameters(device, device->ldev, NULL);
drbd_md_sync(device);
@@ -2140,34 +2149,13 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
static int adm_detach(struct drbd_device *device, int force)
{
- enum drbd_state_rv retcode;
- void *buffer;
- int ret;
-
if (force) {
set_bit(FORCE_DETACH, &device->flags);
drbd_force_state(device, NS(disk, D_FAILED));
- retcode = SS_SUCCESS;
- goto out;
+ return SS_SUCCESS;
}
- drbd_suspend_io(device); /* so no-one is stuck in drbd_al_begin_io */
- buffer = drbd_md_get_buffer(device, __func__); /* make sure there is no in-flight meta-data IO */
- if (buffer) {
- retcode = drbd_request_state(device, NS(disk, D_FAILED));
- drbd_md_put_buffer(device);
- } else /* already <= D_FAILED */
- retcode = SS_NOTHING_TO_DO;
- /* D_FAILED will transition to DISKLESS. */
- drbd_resume_io(device);
- ret = wait_event_interruptible(device->misc_wait,
- device->state.disk != D_FAILED);
- if ((int)retcode == (int)SS_IS_DISKLESS)
- retcode = SS_NOTHING_TO_DO;
- if (ret)
- retcode = ERR_INTR;
-out:
- return retcode;
+ return drbd_request_detach_interruptible(device);
}
/* Detaching the disk is a process in multiple stages. First we need to lock
diff --git a/drivers/block/drbd/drbd_proc.c b/drivers/block/drbd/drbd_proc.c
index 8378142f7a55..582caeb0de86 100644
--- a/drivers/block/drbd/drbd_proc.c
+++ b/drivers/block/drbd/drbd_proc.c
@@ -127,7 +127,7 @@ static void drbd_syncer_progress(struct drbd_device *device, struct seq_file *se
seq_putc(seq, '=');
seq_putc(seq, '>');
for (i = 0; i < y; i++)
- seq_printf(seq, ".");
+ seq_putc(seq, '.');
seq_puts(seq, "] ");
if (state.conn == C_VERIFY_S || state.conn == C_VERIFY_T)
@@ -179,7 +179,7 @@ static void drbd_syncer_progress(struct drbd_device *device, struct seq_file *se
seq_printf_with_thousands_grouping(seq, dbdt);
seq_puts(seq, " (");
/* ------------------------- ~3s average ------------------------ */
- if (proc_details >= 1) {
+ if (drbd_proc_details >= 1) {
/* this is what drbd_rs_should_slow_down() uses */
i = (device->rs_last_mark + DRBD_SYNC_MARKS-1) % DRBD_SYNC_MARKS;
dt = (jiffies - device->rs_mark_time[i]) / HZ;
@@ -209,7 +209,7 @@ static void drbd_syncer_progress(struct drbd_device *device, struct seq_file *se
}
seq_printf(seq, " K/sec%s\n", stalled ? " (stalled)" : "");
- if (proc_details >= 1) {
+ if (drbd_proc_details >= 1) {
/* 64 bit:
* we convert to sectors in the display below. */
unsigned long bm_bits = drbd_bm_bits(device);
@@ -332,13 +332,13 @@ static int drbd_seq_show(struct seq_file *seq, void *v)
state.conn == C_VERIFY_T)
drbd_syncer_progress(device, seq, state);
- if (proc_details >= 1 && get_ldev_if_state(device, D_FAILED)) {
+ if (drbd_proc_details >= 1 && get_ldev_if_state(device, D_FAILED)) {
lc_seq_printf_stats(seq, device->resync);
lc_seq_printf_stats(seq, device->act_log);
put_ldev(device);
}
- if (proc_details >= 2)
+ if (drbd_proc_details >= 2)
seq_printf(seq, "\tblocked on activity log: %d\n", atomic_read(&device->ap_actlog_cnt));
}
rcu_read_unlock();
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index c7e95e6380fb..796eaf347dc0 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -332,7 +332,7 @@ static void drbd_free_pages(struct drbd_device *device, struct page *page, int i
if (page == NULL)
return;
- if (drbd_pp_vacant > (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * minor_count)
+ if (drbd_pp_vacant > (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * drbd_minor_count)
i = page_chain_free(page);
else {
struct page *tmp;
@@ -1100,7 +1100,10 @@ randomize:
idr_for_each_entry(&connection->peer_devices, peer_device, vnr)
mutex_lock(peer_device->device->state_mutex);
+ /* avoid a race with conn_request_state( C_DISCONNECTING ) */
+ spin_lock_irq(&connection->resource->req_lock);
set_bit(STATE_SENT, &connection->flags);
+ spin_unlock_irq(&connection->resource->req_lock);
idr_for_each_entry(&connection->peer_devices, peer_device, vnr)
mutex_unlock(peer_device->device->state_mutex);
@@ -1194,6 +1197,14 @@ static int decode_header(struct drbd_connection *connection, void *header, struc
return 0;
}
+static void drbd_unplug_all_devices(struct drbd_connection *connection)
+{
+ if (current->plug == &connection->receiver_plug) {
+ blk_finish_plug(&connection->receiver_plug);
+ blk_start_plug(&connection->receiver_plug);
+ } /* else: maybe just schedule() ?? */
+}
+
static int drbd_recv_header(struct drbd_connection *connection, struct packet_info *pi)
{
void *buffer = connection->data.rbuf;
@@ -1209,6 +1220,36 @@ static int drbd_recv_header(struct drbd_connection *connection, struct packet_in
return err;
}
+static int drbd_recv_header_maybe_unplug(struct drbd_connection *connection, struct packet_info *pi)
+{
+ void *buffer = connection->data.rbuf;
+ unsigned int size = drbd_header_size(connection);
+ int err;
+
+ err = drbd_recv_short(connection->data.socket, buffer, size, MSG_NOSIGNAL|MSG_DONTWAIT);
+ if (err != size) {
+ /* If we have nothing in the receive buffer now, to reduce
+ * application latency, try to drain the backend queues as
+ * quickly as possible, and let remote TCP know what we have
+ * received so far. */
+ if (err == -EAGAIN) {
+ drbd_tcp_quickack(connection->data.socket);
+ drbd_unplug_all_devices(connection);
+ }
+ if (err > 0) {
+ buffer += err;
+ size -= err;
+ }
+ err = drbd_recv_all_warn(connection, buffer, size);
+ if (err)
+ return err;
+ }
+
+ err = decode_header(connection, connection->data.rbuf, pi);
+ connection->last_received = jiffies;
+
+ return err;
+}
/* This is blkdev_issue_flush, but asynchronous.
* We want to submit to all component volumes in parallel,
* then wait for all completions.
@@ -1223,7 +1264,7 @@ struct one_flush_context {
struct issue_flush_context *ctx;
};
-void one_flush_endio(struct bio *bio)
+static void one_flush_endio(struct bio *bio)
{
struct one_flush_context *octx = bio->bi_private;
struct drbd_device *device = octx->device;
@@ -1265,7 +1306,7 @@ static void submit_one_flush(struct drbd_device *device, struct issue_flush_cont
octx->device = device;
octx->ctx = ctx;
- bio->bi_bdev = device->ldev->backing_bdev;
+ bio_set_dev(bio, device->ldev->backing_bdev);
bio->bi_private = octx;
bio->bi_end_io = one_flush_endio;
bio->bi_opf = REQ_OP_FLUSH | REQ_PREFLUSH;
@@ -1548,7 +1589,7 @@ next_bio:
}
/* > peer_req->i.sector, unless this is the first bio */
bio->bi_iter.bi_sector = sector;
- bio->bi_bdev = device->ldev->backing_bdev;
+ bio_set_dev(bio, device->ldev->backing_bdev);
bio_set_op_attrs(bio, op, op_flags);
bio->bi_private = peer_req;
bio->bi_end_io = drbd_peer_request_endio;
@@ -4085,7 +4126,7 @@ static int receive_uuids(struct drbd_connection *connection, struct packet_info
return config_unknown_volume(connection, pi);
device = peer_device->device;
- p_uuid = kmalloc(sizeof(u64)*UI_EXTENDED_SIZE, GFP_NOIO);
+ p_uuid = kmalloc_array(UI_EXTENDED_SIZE, sizeof(*p_uuid), GFP_NOIO);
if (!p_uuid) {
drbd_err(device, "kmalloc of p_uuid failed\n");
return false;
@@ -4882,8 +4923,8 @@ static void drbdd(struct drbd_connection *connection)
struct data_cmd const *cmd;
drbd_thread_current_set_cpu(&connection->receiver);
- update_receiver_timing_details(connection, drbd_recv_header);
- if (drbd_recv_header(connection, &pi))
+ update_receiver_timing_details(connection, drbd_recv_header_maybe_unplug);
+ if (drbd_recv_header_maybe_unplug(connection, &pi))
goto err_out;
cmd = &drbd_cmd_handler[pi.cmd];
@@ -5375,8 +5416,11 @@ int drbd_receiver(struct drbd_thread *thi)
}
} while (h == 0);
- if (h > 0)
+ if (h > 0) {
+ blk_start_plug(&connection->receiver_plug);
drbdd(connection);
+ blk_finish_plug(&connection->receiver_plug);
+ }
conn_disconnect(connection);
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index f6e865b2d543..de8566e55334 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -36,14 +36,18 @@ static bool drbd_may_do_local_read(struct drbd_device *device, sector_t sector,
/* Update disk stats at start of I/O request */
static void _drbd_start_io_acct(struct drbd_device *device, struct drbd_request *req)
{
- generic_start_io_acct(bio_data_dir(req->master_bio), req->i.size >> 9,
- &device->vdisk->part0);
+ struct request_queue *q = device->rq_queue;
+
+ generic_start_io_acct(q, bio_data_dir(req->master_bio),
+ req->i.size >> 9, &device->vdisk->part0);
}
/* Update disk stats when completing request upwards */
static void _drbd_end_io_acct(struct drbd_device *device, struct drbd_request *req)
{
- generic_end_io_acct(bio_data_dir(req->master_bio),
+ struct request_queue *q = device->rq_queue;
+
+ generic_end_io_acct(q, bio_data_dir(req->master_bio),
&device->vdisk->part0, req->start_jif);
}
@@ -1175,7 +1179,7 @@ drbd_submit_req_private_bio(struct drbd_request *req)
else
type = DRBD_FAULT_DT_RD;
- bio->bi_bdev = device->ldev->backing_bdev;
+ bio_set_dev(bio, device->ldev->backing_bdev);
/* State may have changed since we grabbed our reference on the
* ->ldev member. Double check, and short-circuit to endio.
@@ -1275,6 +1279,57 @@ static bool may_do_writes(struct drbd_device *device)
return s.disk == D_UP_TO_DATE || s.pdsk == D_UP_TO_DATE;
}
+struct drbd_plug_cb {
+ struct blk_plug_cb cb;
+ struct drbd_request *most_recent_req;
+ /* do we need more? */
+};
+
+static void drbd_unplug(struct blk_plug_cb *cb, bool from_schedule)
+{
+ struct drbd_plug_cb *plug = container_of(cb, struct drbd_plug_cb, cb);
+ struct drbd_resource *resource = plug->cb.data;
+ struct drbd_request *req = plug->most_recent_req;
+
+ kfree(cb);
+ if (!req)
+ return;
+
+ spin_lock_irq(&resource->req_lock);
+ /* In case the sender did not process it yet, raise the flag to
+ * have it followed with P_UNPLUG_REMOTE just after. */
+ req->rq_state |= RQ_UNPLUG;
+ /* but also queue a generic unplug */
+ drbd_queue_unplug(req->device);
+ kref_put(&req->kref, drbd_req_destroy);
+ spin_unlock_irq(&resource->req_lock);
+}
+
+static struct drbd_plug_cb* drbd_check_plugged(struct drbd_resource *resource)
+{
+ /* A lot of text to say
+ * return (struct drbd_plug_cb*)blk_check_plugged(); */
+ struct drbd_plug_cb *plug;
+ struct blk_plug_cb *cb = blk_check_plugged(drbd_unplug, resource, sizeof(*plug));
+
+ if (cb)
+ plug = container_of(cb, struct drbd_plug_cb, cb);
+ else
+ plug = NULL;
+ return plug;
+}
+
+static void drbd_update_plug(struct drbd_plug_cb *plug, struct drbd_request *req)
+{
+ struct drbd_request *tmp = plug->most_recent_req;
+ /* Will be sent to some peer.
+ * Remember to tag it with UNPLUG_REMOTE on unplug */
+ kref_get(&req->kref);
+ plug->most_recent_req = req;
+ if (tmp)
+ kref_put(&tmp->kref, drbd_req_destroy);
+}
+
static void drbd_send_and_submit(struct drbd_device *device, struct drbd_request *req)
{
struct drbd_resource *resource = device->resource;
@@ -1347,6 +1402,12 @@ static void drbd_send_and_submit(struct drbd_device *device, struct drbd_request
no_remote = true;
}
+ if (no_remote == false) {
+ struct drbd_plug_cb *plug = drbd_check_plugged(resource);
+ if (plug)
+ drbd_update_plug(plug, req);
+ }
+
/* If it took the fast path in drbd_request_prepare, add it here.
* The slow path has added it already. */
if (list_empty(&req->req_pending_master_completion))
@@ -1395,7 +1456,10 @@ void __drbd_make_request(struct drbd_device *device, struct bio *bio, unsigned l
static void submit_fast_path(struct drbd_device *device, struct list_head *incoming)
{
+ struct blk_plug plug;
struct drbd_request *req, *tmp;
+
+ blk_start_plug(&plug);
list_for_each_entry_safe(req, tmp, incoming, tl_requests) {
const int rw = bio_data_dir(req->master_bio);
@@ -1413,6 +1477,7 @@ static void submit_fast_path(struct drbd_device *device, struct list_head *incom
list_del_init(&req->tl_requests);
drbd_send_and_submit(device, req);
}
+ blk_finish_plug(&plug);
}
static bool prepare_al_transaction_nonblock(struct drbd_device *device,
@@ -1420,12 +1485,12 @@ static bool prepare_al_transaction_nonblock(struct drbd_device *device,
struct list_head *pending,
struct list_head *later)
{
- struct drbd_request *req, *tmp;
+ struct drbd_request *req;
int wake = 0;
int err;
spin_lock_irq(&device->al_lock);
- list_for_each_entry_safe(req, tmp, incoming, tl_requests) {
+ while ((req = list_first_entry_or_null(incoming, struct drbd_request, tl_requests))) {
err = drbd_al_begin_io_nonblock(device, &req->i);
if (err == -ENOBUFS)
break;
@@ -1442,17 +1507,20 @@ static bool prepare_al_transaction_nonblock(struct drbd_device *device,
return !list_empty(pending);
}
-void send_and_submit_pending(struct drbd_device *device, struct list_head *pending)
+static void send_and_submit_pending(struct drbd_device *device, struct list_head *pending)
{
- struct drbd_request *req, *tmp;
+ struct blk_plug plug;
+ struct drbd_request *req;
- list_for_each_entry_safe(req, tmp, pending, tl_requests) {
+ blk_start_plug(&plug);
+ while ((req = list_first_entry_or_null(pending, struct drbd_request, tl_requests))) {
req->rq_state |= RQ_IN_ACT_LOG;
req->in_actlog_jif = jiffies;
atomic_dec(&device->ap_actlog_cnt);
list_del_init(&req->tl_requests);
drbd_send_and_submit(device, req);
}
+ blk_finish_plug(&plug);
}
void do_submit(struct work_struct *ws)
diff --git a/drivers/block/drbd/drbd_req.h b/drivers/block/drbd/drbd_req.h
index 9e1866ab238f..a2254f825601 100644
--- a/drivers/block/drbd/drbd_req.h
+++ b/drivers/block/drbd/drbd_req.h
@@ -212,6 +212,11 @@ enum drbd_req_state_bits {
/* Should call drbd_al_complete_io() for this request... */
__RQ_IN_ACT_LOG,
+ /* This was the most recent request during some blk_finish_plug()
+ * or its implicit from-schedule equivalent.
+ * We may use it as hint to send a P_UNPLUG_REMOTE */
+ __RQ_UNPLUG,
+
/* The peer has sent a retry ACK */
__RQ_POSTPONED,
@@ -249,6 +254,7 @@ enum drbd_req_state_bits {
#define RQ_WSAME (1UL << __RQ_WSAME)
#define RQ_UNMAP (1UL << __RQ_UNMAP)
#define RQ_IN_ACT_LOG (1UL << __RQ_IN_ACT_LOG)
+#define RQ_UNPLUG (1UL << __RQ_UNPLUG)
#define RQ_POSTPONED (1UL << __RQ_POSTPONED)
#define RQ_COMPLETION_SUSP (1UL << __RQ_COMPLETION_SUSP)
#define RQ_EXP_RECEIVE_ACK (1UL << __RQ_EXP_RECEIVE_ACK)
diff --git a/drivers/block/drbd/drbd_state.c b/drivers/block/drbd/drbd_state.c
index eea0c4aec978..0813c654c893 100644
--- a/drivers/block/drbd/drbd_state.c
+++ b/drivers/block/drbd/drbd_state.c
@@ -346,7 +346,7 @@ static enum drbd_role min_role(enum drbd_role role1, enum drbd_role role2)
enum drbd_role conn_highest_role(struct drbd_connection *connection)
{
- enum drbd_role role = R_UNKNOWN;
+ enum drbd_role role = R_SECONDARY;
struct drbd_peer_device *peer_device;
int vnr;
@@ -579,11 +579,14 @@ drbd_req_state(struct drbd_device *device, union drbd_state mask,
unsigned long flags;
union drbd_state os, ns;
enum drbd_state_rv rv;
+ void *buffer = NULL;
init_completion(&done);
if (f & CS_SERIALIZE)
mutex_lock(device->state_mutex);
+ if (f & CS_INHIBIT_MD_IO)
+ buffer = drbd_md_get_buffer(device, __func__);
spin_lock_irqsave(&device->resource->req_lock, flags);
os = drbd_read_state(device);
@@ -636,6 +639,8 @@ drbd_req_state(struct drbd_device *device, union drbd_state mask,
}
abort:
+ if (buffer)
+ drbd_md_put_buffer(device);
if (f & CS_SERIALIZE)
mutex_unlock(device->state_mutex);
@@ -664,6 +669,47 @@ _drbd_request_state(struct drbd_device *device, union drbd_state mask,
return rv;
}
+/*
+ * We grab drbd_md_get_buffer(), because we don't want to "fail" the disk while
+ * there is IO in-flight: the transition into D_FAILED for detach purposes
+ * may get misinterpreted as actual IO error in a confused endio function.
+ *
+ * We wrap it all into wait_event(), to retry in case the drbd_req_state()
+ * returns SS_IN_TRANSIENT_STATE.
+ *
+ * To avoid potential deadlock with e.g. the receiver thread trying to grab
+ * drbd_md_get_buffer() while trying to get out of the "transient state", we
+ * need to grab and release the meta data buffer inside of that wait_event loop.
+ */
+static enum drbd_state_rv
+request_detach(struct drbd_device *device)
+{
+ return drbd_req_state(device, NS(disk, D_FAILED),
+ CS_VERBOSE | CS_ORDERED | CS_INHIBIT_MD_IO);
+}
+
+enum drbd_state_rv
+drbd_request_detach_interruptible(struct drbd_device *device)
+{
+ enum drbd_state_rv rv;
+ int ret;
+
+ drbd_suspend_io(device); /* so no-one is stuck in drbd_al_begin_io */
+ wait_event_interruptible(device->state_wait,
+ (rv = request_detach(device)) != SS_IN_TRANSIENT_STATE);
+ drbd_resume_io(device);
+
+ ret = wait_event_interruptible(device->misc_wait,
+ device->state.disk != D_FAILED);
+
+ if (rv == SS_IS_DISKLESS)
+ rv = SS_NOTHING_TO_DO;
+ if (ret)
+ rv = ERR_INTR;
+
+ return rv;
+}
+
enum drbd_state_rv
_drbd_request_state_holding_state_mutex(struct drbd_device *device, union drbd_state mask,
union drbd_state val, enum chg_state_flags f)
diff --git a/drivers/block/drbd/drbd_state.h b/drivers/block/drbd/drbd_state.h
index 6c9d5d4a8a75..0276c98fbbdd 100644
--- a/drivers/block/drbd/drbd_state.h
+++ b/drivers/block/drbd/drbd_state.h
@@ -71,6 +71,10 @@ enum chg_state_flags {
CS_DC_SUSP = 1 << 10,
CS_DC_MASK = CS_DC_ROLE + CS_DC_PEER + CS_DC_CONN + CS_DC_DISK + CS_DC_PDSK,
CS_IGN_OUTD_FAIL = 1 << 11,
+
+ /* Make sure no meta data IO is in flight, by calling
+ * drbd_md_get_buffer(). Used for graceful detach. */
+ CS_INHIBIT_MD_IO = 1 << 12,
};
/* drbd_dev_state and drbd_state are different types. This is to stress the
@@ -156,6 +160,10 @@ static inline int drbd_request_state(struct drbd_device *device,
return _drbd_request_state(device, mask, val, CS_VERBOSE + CS_ORDERED);
}
+/* for use in adm_detach() (drbd_adm_detach(), drbd_adm_down()) */
+enum drbd_state_rv
+drbd_request_detach_interruptible(struct drbd_device *device);
+
enum drbd_role conn_highest_role(struct drbd_connection *connection);
enum drbd_role conn_highest_peer(struct drbd_connection *connection);
enum drbd_disk_state conn_highest_disk(struct drbd_connection *connection);
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index 1d8726a8df34..03471b3fce86 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -65,6 +65,11 @@ void drbd_md_endio(struct bio *bio)
device = bio->bi_private;
device->md_io.error = blk_status_to_errno(bio->bi_status);
+ /* special case: drbd_md_read() during drbd_adm_attach() */
+ if (device->ldev)
+ put_ldev(device);
+ bio_put(bio);
+
/* We grabbed an extra reference in _drbd_md_sync_page_io() to be able
* to timeout on the lower level device, and eventually detach from it.
* If this io completion runs after that timeout expired, this
@@ -79,9 +84,6 @@ void drbd_md_endio(struct bio *bio)
drbd_md_put_buffer(device);
device->md_io.done = 1;
wake_up(&device->misc_wait);
- bio_put(bio);
- if (device->ldev) /* special case: drbd_md_read() during drbd_adm_attach() */
- put_ldev(device);
}
/* reads on behalf of the partner,
@@ -128,6 +130,14 @@ void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req) __releases(l
block_id = peer_req->block_id;
peer_req->flags &= ~EE_CALL_AL_COMPLETE_IO;
+ if (peer_req->flags & EE_WAS_ERROR) {
+ /* In protocol != C, we usually do not send write acks.
+ * In case of a write error, send the neg ack anyways. */
+ if (!__test_and_set_bit(__EE_SEND_WRITE_ACK, &peer_req->flags))
+ inc_unacked(device);
+ drbd_set_out_of_sync(device, peer_req->i.sector, peer_req->i.size);
+ }
+
spin_lock_irqsave(&device->resource->req_lock, flags);
device->writ_cnt += peer_req->i.size >> 9;
list_move_tail(&peer_req->w.list, &device->done_ee);
@@ -195,7 +205,8 @@ void drbd_peer_request_endio(struct bio *bio)
}
}
-void drbd_panic_after_delayed_completion_of_aborted_request(struct drbd_device *device)
+static void
+drbd_panic_after_delayed_completion_of_aborted_request(struct drbd_device *device)
{
panic("drbd%u %s/%u potential random memory corruption caused by delayed completion of aborted local request\n",
device->minor, device->resource->name, device->vnr);
@@ -1382,18 +1393,22 @@ static int drbd_send_barrier(struct drbd_connection *connection)
return conn_send_command(connection, sock, P_BARRIER, sizeof(*p), NULL, 0);
}
+static int pd_send_unplug_remote(struct drbd_peer_device *pd)
+{
+ struct drbd_socket *sock = &pd->connection->data;
+ if (!drbd_prepare_command(pd, sock))
+ return -EIO;
+ return drbd_send_command(pd, sock, P_UNPLUG_REMOTE, 0, NULL, 0);
+}
+
int w_send_write_hint(struct drbd_work *w, int cancel)
{
struct drbd_device *device =
container_of(w, struct drbd_device, unplug_work);
- struct drbd_socket *sock;
if (cancel)
return 0;
- sock = &first_peer_device(device)->connection->data;
- if (!drbd_prepare_command(first_peer_device(device), sock))
- return -EIO;
- return drbd_send_command(first_peer_device(device), sock, P_UNPLUG_REMOTE, 0, NULL, 0);
+ return pd_send_unplug_remote(first_peer_device(device));
}
static void re_init_if_first_write(struct drbd_connection *connection, unsigned int epoch)
@@ -1455,6 +1470,7 @@ int w_send_dblock(struct drbd_work *w, int cancel)
struct drbd_device *device = req->device;
struct drbd_peer_device *const peer_device = first_peer_device(device);
struct drbd_connection *connection = peer_device->connection;
+ bool do_send_unplug = req->rq_state & RQ_UNPLUG;
int err;
if (unlikely(cancel)) {
@@ -1470,6 +1486,9 @@ int w_send_dblock(struct drbd_work *w, int cancel)
err = drbd_send_dblock(peer_device, req);
req_mod(req, err ? SEND_FAILED : HANDED_OVER_TO_NETWORK);
+ if (do_send_unplug && !err)
+ pd_send_unplug_remote(peer_device);
+
return err;
}
@@ -1484,6 +1503,7 @@ int w_send_read_req(struct drbd_work *w, int cancel)
struct drbd_device *device = req->device;
struct drbd_peer_device *const peer_device = first_peer_device(device);
struct drbd_connection *connection = peer_device->connection;
+ bool do_send_unplug = req->rq_state & RQ_UNPLUG;
int err;
if (unlikely(cancel)) {
@@ -1501,6 +1521,9 @@ int w_send_read_req(struct drbd_work *w, int cancel)
req_mod(req, err ? SEND_FAILED : HANDED_OVER_TO_NETWORK);
+ if (do_send_unplug && !err)
+ pd_send_unplug_remote(peer_device);
+
return err;
}
@@ -1513,7 +1536,7 @@ int w_restart_disk_io(struct drbd_work *w, int cancel)
drbd_al_begin_io(device, &req->i);
drbd_req_make_private_bio(req, req->master_bio);
- req->private_bio->bi_bdev = device->ldev->backing_bdev;
+ bio_set_dev(req->private_bio, device->ldev->backing_bdev);
generic_make_request(req->private_bio);
return 0;
@@ -1733,6 +1756,11 @@ void drbd_start_resync(struct drbd_device *device, enum drbd_conns side)
return;
}
+ if (!connection) {
+ drbd_err(device, "No connection to peer, aborting!\n");
+ return;
+ }
+
if (!test_bit(B_RS_H_DONE, &device->flags)) {
if (side == C_SYNC_TARGET) {
/* Since application IO was locked out during C_WF_BITMAP_T and
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 9c00f29e40c1..60c086a53609 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -4134,7 +4134,7 @@ static int __floppy_read_block_0(struct block_device *bdev, int drive)
cbdata.drive = drive;
bio_init(&bio, &bio_vec, 1);
- bio.bi_bdev = bdev;
+ bio_set_dev(&bio, bdev);
bio_add_page(&bio, page, size, 0);
bio.bi_iter.bi_sector = 0;
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index ef8334949b42..85de67334695 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -213,16 +213,18 @@ static void __loop_update_dio(struct loop_device *lo, bool dio)
*/
blk_mq_freeze_queue(lo->lo_queue);
lo->use_dio = use_dio;
- if (use_dio)
+ if (use_dio) {
+ queue_flag_clear_unlocked(QUEUE_FLAG_NOMERGES, lo->lo_queue);
lo->lo_flags |= LO_FLAGS_DIRECT_IO;
- else
+ } else {
+ queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, lo->lo_queue);
lo->lo_flags &= ~LO_FLAGS_DIRECT_IO;
+ }
blk_mq_unfreeze_queue(lo->lo_queue);
}
static int
-figure_loop_size(struct loop_device *lo, loff_t offset, loff_t sizelimit,
- loff_t logical_blocksize)
+figure_loop_size(struct loop_device *lo, loff_t offset, loff_t sizelimit)
{
loff_t size = get_size(offset, sizelimit, lo->lo_backing_file);
sector_t x = (sector_t)size;
@@ -234,12 +236,6 @@ figure_loop_size(struct loop_device *lo, loff_t offset, loff_t sizelimit,
lo->lo_offset = offset;
if (lo->lo_sizelimit != sizelimit)
lo->lo_sizelimit = sizelimit;
- if (lo->lo_flags & LO_FLAGS_BLOCKSIZE) {
- lo->lo_logical_blocksize = logical_blocksize;
- blk_queue_physical_block_size(lo->lo_queue, lo->lo_blocksize);
- blk_queue_logical_block_size(lo->lo_queue,
- lo->lo_logical_blocksize);
- }
set_capacity(lo->lo_disk, x);
bd_set_size(bdev, (loff_t)get_capacity(bdev->bd_disk) << 9);
/* let user-space know about the new size */
@@ -467,12 +463,21 @@ static void lo_complete_rq(struct request *rq)
blk_mq_end_request(rq, cmd->ret < 0 ? BLK_STS_IOERR : BLK_STS_OK);
}
+static void lo_rw_aio_do_completion(struct loop_cmd *cmd)
+{
+ if (!atomic_dec_and_test(&cmd->ref))
+ return;
+ kfree(cmd->bvec);
+ cmd->bvec = NULL;
+ blk_mq_complete_request(cmd->rq);
+}
+
static void lo_rw_aio_complete(struct kiocb *iocb, long ret, long ret2)
{
struct loop_cmd *cmd = container_of(iocb, struct loop_cmd, iocb);
cmd->ret = ret;
- blk_mq_complete_request(cmd->rq);
+ lo_rw_aio_do_completion(cmd);
}
static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd,
@@ -480,22 +485,51 @@ static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd,
{
struct iov_iter iter;
struct bio_vec *bvec;
- struct bio *bio = cmd->rq->bio;
+ struct request *rq = cmd->rq;
+ struct bio *bio = rq->bio;
struct file *file = lo->lo_backing_file;
+ unsigned int offset;
+ int segments = 0;
int ret;
- /* nomerge for loop request queue */
- WARN_ON(cmd->rq->bio != cmd->rq->biotail);
+ if (rq->bio != rq->biotail) {
+ struct req_iterator iter;
+ struct bio_vec tmp;
+
+ __rq_for_each_bio(bio, rq)
+ segments += bio_segments(bio);
+ bvec = kmalloc(sizeof(struct bio_vec) * segments, GFP_NOIO);
+ if (!bvec)
+ return -EIO;
+ cmd->bvec = bvec;
+
+ /*
+ * The bios of the request may be started from the middle of
+ * the 'bvec' because of bio splitting, so we can't directly
+ * copy bio->bi_iov_vec to new bvec. The rq_for_each_segment
+ * API will take care of all details for us.
+ */
+ rq_for_each_segment(tmp, rq, iter) {
+ *bvec = tmp;
+ bvec++;
+ }
+ bvec = cmd->bvec;
+ offset = 0;
+ } else {
+ /*
+ * Same here, this bio may be started from the middle of the
+ * 'bvec' because of bio splitting, so offset from the bvec
+ * must be passed to iov iterator
+ */
+ offset = bio->bi_iter.bi_bvec_done;
+ bvec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
+ segments = bio_segments(bio);
+ }
+ atomic_set(&cmd->ref, 2);
- bvec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
iov_iter_bvec(&iter, ITER_BVEC | rw, bvec,
- bio_segments(bio), blk_rq_bytes(cmd->rq));
- /*
- * This bio may be started from the middle of the 'bvec'
- * because of bio splitting, so offset from the bvec must
- * be passed to iov iterator
- */
- iter.iov_offset = bio->bi_iter.bi_bvec_done;
+ segments, blk_rq_bytes(rq));
+ iter.iov_offset = offset;
cmd->iocb.ki_pos = pos;
cmd->iocb.ki_filp = file;
@@ -507,6 +541,8 @@ static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd,
else
ret = call_read_iter(file, &cmd->iocb, &iter);
+ lo_rw_aio_do_completion(cmd);
+
if (ret != -EIOCBQUEUED)
cmd->iocb.ki_complete(&cmd->iocb, ret, 0);
return 0;
@@ -553,74 +589,12 @@ static int do_req_filebacked(struct loop_device *lo, struct request *rq)
}
}
-struct switch_request {
- struct file *file;
- struct completion wait;
-};
-
static inline void loop_update_dio(struct loop_device *lo)
{
__loop_update_dio(lo, io_is_direct(lo->lo_backing_file) |
lo->use_dio);
}
-/*
- * Do the actual switch; called from the BIO completion routine
- */
-static void do_loop_switch(struct loop_device *lo, struct switch_request *p)
-{
- struct file *file = p->file;
- struct file *old_file = lo->lo_backing_file;
- struct address_space *mapping;
-
- /* if no new file, only flush of queued bios requested */
- if (!file)
- return;
-
- mapping = file->f_mapping;
- mapping_set_gfp_mask(old_file->f_mapping, lo->old_gfp_mask);
- lo->lo_backing_file = file;
- lo->lo_blocksize = S_ISBLK(mapping->host->i_mode) ?
- mapping->host->i_bdev->bd_block_size : PAGE_SIZE;
- lo->old_gfp_mask = mapping_gfp_mask(mapping);
- mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
- loop_update_dio(lo);
-}
-
-/*
- * loop_switch performs the hard work of switching a backing store.
- * First it needs to flush existing IO, it does this by sending a magic
- * BIO down the pipe. The completion of this BIO does the actual switch.
- */
-static int loop_switch(struct loop_device *lo, struct file *file)
-{
- struct switch_request w;
-
- w.file = file;
-
- /* freeze queue and wait for completion of scheduled requests */
- blk_mq_freeze_queue(lo->lo_queue);
-
- /* do the switch action */
- do_loop_switch(lo, &w);
-
- /* unfreeze */
- blk_mq_unfreeze_queue(lo->lo_queue);
-
- return 0;
-}
-
-/*
- * Helper to flush the IOs in loop, but keeping loop thread running
- */
-static int loop_flush(struct loop_device *lo)
-{
- /* loop not yet configured, no running thread, nothing to flush */
- if (lo->lo_state != Lo_bound)
- return 0;
- return loop_switch(lo, NULL);
-}
-
static void loop_reread_partitions(struct loop_device *lo,
struct block_device *bdev)
{
@@ -685,9 +659,14 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
goto out_putf;
/* and ... switch */
- error = loop_switch(lo, file);
- if (error)
- goto out_putf;
+ blk_mq_freeze_queue(lo->lo_queue);
+ mapping_set_gfp_mask(old_file->f_mapping, lo->old_gfp_mask);
+ lo->lo_backing_file = file;
+ lo->old_gfp_mask = mapping_gfp_mask(file->f_mapping);
+ mapping_set_gfp_mask(file->f_mapping,
+ lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
+ loop_update_dio(lo);
+ blk_mq_unfreeze_queue(lo->lo_queue);
fput(old_file);
if (lo->lo_flags & LO_FLAGS_PARTSCAN)
@@ -820,7 +799,6 @@ static void loop_config_discard(struct loop_device *lo)
struct file *file = lo->lo_backing_file;
struct inode *inode = file->f_mapping->host;
struct request_queue *q = lo->lo_queue;
- int lo_bits = 9;
/*
* We use punch hole to reclaim the free space used by the
@@ -840,11 +818,9 @@ static void loop_config_discard(struct loop_device *lo)
q->limits.discard_granularity = inode->i_sb->s_blocksize;
q->limits.discard_alignment = 0;
- if (lo->lo_flags & LO_FLAGS_BLOCKSIZE)
- lo_bits = blksize_bits(lo->lo_logical_blocksize);
- blk_queue_max_discard_sectors(q, UINT_MAX >> lo_bits);
- blk_queue_max_write_zeroes_sectors(q, UINT_MAX >> lo_bits);
+ blk_queue_max_discard_sectors(q, UINT_MAX >> 9);
+ blk_queue_max_write_zeroes_sectors(q, UINT_MAX >> 9);
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
}
@@ -877,7 +853,6 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
struct file *file, *f;
struct inode *inode;
struct address_space *mapping;
- unsigned lo_blocksize;
int lo_flags = 0;
int error;
loff_t size;
@@ -921,9 +896,6 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
!file->f_op->write_iter)
lo_flags |= LO_FLAGS_READ_ONLY;
- lo_blocksize = S_ISBLK(inode->i_mode) ?
- inode->i_bdev->bd_block_size : PAGE_SIZE;
-
error = -EFBIG;
size = get_loop_size(lo, file);
if ((loff_t)(sector_t)size != size)
@@ -937,8 +909,6 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
set_device_ro(bdev, (lo_flags & LO_FLAGS_READ_ONLY) != 0);
lo->use_dio = false;
- lo->lo_blocksize = lo_blocksize;
- lo->lo_logical_blocksize = 512;
lo->lo_device = bdev;
lo->lo_flags = lo_flags;
lo->lo_backing_file = file;
@@ -958,7 +928,8 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
/* let user-space know about the new size */
kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE);
- set_blocksize(bdev, lo_blocksize);
+ set_blocksize(bdev, S_ISBLK(inode->i_mode) ?
+ block_size(inode->i_bdev) : PAGE_SIZE);
lo->lo_state = Lo_bound;
if (part_shift)
@@ -1064,6 +1035,9 @@ static int loop_clr_fd(struct loop_device *lo)
memset(lo->lo_encrypt_key, 0, LO_KEY_SIZE);
memset(lo->lo_crypt_name, 0, LO_NAME_SIZE);
memset(lo->lo_file_name, 0, LO_NAME_SIZE);
+ blk_queue_logical_block_size(lo->lo_queue, 512);
+ blk_queue_physical_block_size(lo->lo_queue, 512);
+ blk_queue_io_min(lo->lo_queue, 512);
if (bdev) {
bdput(bdev);
invalidate_bdev(bdev);
@@ -1104,7 +1078,6 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
int err;
struct loop_func_table *xfer;
kuid_t uid = current_uid();
- int lo_flags = lo->lo_flags;
if (lo->lo_encrypt_key_size &&
!uid_eq(lo->lo_key_owner, uid) &&
@@ -1137,26 +1110,9 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
if (err)
goto exit;
- if (info->lo_flags & LO_FLAGS_BLOCKSIZE) {
- if (!(lo->lo_flags & LO_FLAGS_BLOCKSIZE))
- lo->lo_logical_blocksize = 512;
- lo->lo_flags |= LO_FLAGS_BLOCKSIZE;
- if (LO_INFO_BLOCKSIZE(info) != 512 &&
- LO_INFO_BLOCKSIZE(info) != 1024 &&
- LO_INFO_BLOCKSIZE(info) != 2048 &&
- LO_INFO_BLOCKSIZE(info) != 4096)
- return -EINVAL;
- if (LO_INFO_BLOCKSIZE(info) > lo->lo_blocksize)
- return -EINVAL;
- }
-
if (lo->lo_offset != info->lo_offset ||
- lo->lo_sizelimit != info->lo_sizelimit ||
- lo->lo_flags != lo_flags ||
- ((lo->lo_flags & LO_FLAGS_BLOCKSIZE) &&
- lo->lo_logical_blocksize != LO_INFO_BLOCKSIZE(info))) {
- if (figure_loop_size(lo, info->lo_offset, info->lo_sizelimit,
- LO_INFO_BLOCKSIZE(info))) {
+ lo->lo_sizelimit != info->lo_sizelimit) {
+ if (figure_loop_size(lo, info->lo_offset, info->lo_sizelimit)) {
err = -EFBIG;
goto exit;
}
@@ -1348,8 +1304,7 @@ static int loop_set_capacity(struct loop_device *lo)
if (unlikely(lo->lo_state != Lo_bound))
return -ENXIO;
- return figure_loop_size(lo, lo->lo_offset, lo->lo_sizelimit,
- lo->lo_logical_blocksize);
+ return figure_loop_size(lo, lo->lo_offset, lo->lo_sizelimit);
}
static int loop_set_dio(struct loop_device *lo, unsigned long arg)
@@ -1366,6 +1321,26 @@ static int loop_set_dio(struct loop_device *lo, unsigned long arg)
return error;
}
+static int loop_set_block_size(struct loop_device *lo, unsigned long arg)
+{
+ if (lo->lo_state != Lo_bound)
+ return -ENXIO;
+
+ if (arg < 512 || arg > PAGE_SIZE || !is_power_of_2(arg))
+ return -EINVAL;
+
+ blk_mq_freeze_queue(lo->lo_queue);
+
+ blk_queue_logical_block_size(lo->lo_queue, arg);
+ blk_queue_physical_block_size(lo->lo_queue, arg);
+ blk_queue_io_min(lo->lo_queue, arg);
+ loop_update_dio(lo);
+
+ blk_mq_unfreeze_queue(lo->lo_queue);
+
+ return 0;
+}
+
static int lo_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
@@ -1414,6 +1389,11 @@ static int lo_ioctl(struct block_device *bdev, fmode_t mode,
if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN))
err = loop_set_dio(lo, arg);
break;
+ case LOOP_SET_BLOCK_SIZE:
+ err = -EPERM;
+ if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN))
+ err = loop_set_block_size(lo, arg);
+ break;
default:
err = lo->ioctl ? lo->ioctl(lo, cmd, arg) : -EINVAL;
}
@@ -1613,12 +1593,13 @@ static void lo_release(struct gendisk *disk, fmode_t mode)
err = loop_clr_fd(lo);
if (!err)
return;
- } else {
+ } else if (lo->lo_state == Lo_bound) {
/*
* Otherwise keep thread (if running) and config,
* but flush possible ongoing bios in thread.
*/
- loop_flush(lo);
+ blk_mq_freeze_queue(lo->lo_queue);
+ blk_mq_unfreeze_queue(lo->lo_queue);
}
mutex_unlock(&lo->lo_ctl_mutex);
@@ -1800,9 +1781,13 @@ static int loop_add(struct loop_device **l, int i)
}
lo->lo_queue->queuedata = lo;
+ blk_queue_max_hw_sectors(lo->lo_queue, BLK_DEF_MAX_SECTORS);
+
/*
- * It doesn't make sense to enable merge because the I/O
- * submitted to backing file is handled page by page.
+ * By default, we do buffer IO, so it doesn't make sense to enable
+ * merge because the I/O submitted to backing file is handled page by
+ * page. For directio mode, merge does help to dispatch bigger request
+ * to underlayer disk. We will enable merge once directio is enabled.
*/
queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, lo->lo_queue);
@@ -1996,10 +1981,6 @@ static int __init loop_init(void)
struct loop_device *lo;
int err;
- err = misc_register(&loop_misc);
- if (err < 0)
- return err;
-
part_shift = 0;
if (max_part > 0) {
part_shift = fls(max_part);
@@ -2017,12 +1998,12 @@ static int __init loop_init(void)
if ((1UL << part_shift) > DISK_MAX_PARTS) {
err = -EINVAL;
- goto misc_out;
+ goto err_out;
}
if (max_loop > 1UL << (MINORBITS - part_shift)) {
err = -EINVAL;
- goto misc_out;
+ goto err_out;
}
/*
@@ -2041,6 +2022,11 @@ static int __init loop_init(void)
range = 1UL << MINORBITS;
}
+ err = misc_register(&loop_misc);
+ if (err < 0)
+ goto err_out;
+
+
if (register_blkdev(LOOP_MAJOR, "loop")) {
err = -EIO;
goto misc_out;
@@ -2060,6 +2046,7 @@ static int __init loop_init(void)
misc_out:
misc_deregister(&loop_misc);
+err_out:
return err;
}
diff --git a/drivers/block/loop.h b/drivers/block/loop.h
index 2c096b9a17b8..f68c1d50802f 100644
--- a/drivers/block/loop.h
+++ b/drivers/block/loop.h
@@ -48,8 +48,6 @@ struct loop_device {
struct file * lo_backing_file;
struct block_device *lo_device;
- unsigned lo_blocksize;
- unsigned lo_logical_blocksize;
void *key_data;
gfp_t old_gfp_mask;
@@ -69,10 +67,13 @@ struct loop_device {
struct loop_cmd {
struct kthread_work work;
struct request *rq;
- struct list_head list;
- bool use_aio; /* use AIO interface to handle I/O */
+ union {
+ bool use_aio; /* use AIO interface to handle I/O */
+ atomic_t ref; /* only for aio */
+ };
long ret;
struct kiocb iocb;
+ struct bio_vec *bvec;
};
/* Support for loadable transfer modules */
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 5bdf923294a5..2aa87cbdede0 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -128,7 +128,7 @@ static struct dentry *nbd_dbg_dir;
#define NBD_MAGIC 0x68797548
static unsigned int nbds_max = 16;
-static int max_part;
+static int max_part = 16;
static struct workqueue_struct *recv_workqueue;
static int part_shift;
@@ -165,7 +165,7 @@ static ssize_t pid_show(struct device *dev,
return sprintf(buf, "%d\n", task_pid_nr(nbd->task_recv));
}
-static struct device_attribute pid_attr = {
+static const struct device_attribute pid_attr = {
.attr = { .name = "pid", .mode = S_IRUGO},
.show = pid_show,
};
@@ -1584,6 +1584,15 @@ again:
}
} else {
nbd = idr_find(&nbd_index_idr, index);
+ if (!nbd) {
+ ret = nbd_dev_add(index);
+ if (ret < 0) {
+ mutex_unlock(&nbd_index_mutex);
+ printk(KERN_ERR "nbd: failed to add new device\n");
+ return ret;
+ }
+ nbd = idr_find(&nbd_index_idr, index);
+ }
}
if (!nbd) {
printk(KERN_ERR "nbd: couldn't find device at index %d\n",
@@ -2137,4 +2146,4 @@ MODULE_LICENSE("GPL");
module_param(nbds_max, int, 0444);
MODULE_PARM_DESC(nbds_max, "number of network block devices to initialize (default: 16)");
module_param(max_part, int, 0444);
-MODULE_PARM_DESC(max_part, "number of partitions per device (default: 0)");
+MODULE_PARM_DESC(max_part, "number of partitions per device (default: 16)");
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
index 85c24cace973..8042c26ea9e6 100644
--- a/drivers/block/null_blk.c
+++ b/drivers/block/null_blk.c
@@ -1,3 +1,7 @@
+/*
+ * Add configfs and memory store: Kyungchan Koh <kkc6196@fb.com> and
+ * Shaohua Li <shli@fb.com>
+ */
#include <linux/module.h>
#include <linux/moduleparam.h>
@@ -9,27 +13,110 @@
#include <linux/blk-mq.h>
#include <linux/hrtimer.h>
#include <linux/lightnvm.h>
+#include <linux/configfs.h>
+#include <linux/badblocks.h>
+
+#define SECTOR_SHIFT 9
+#define PAGE_SECTORS_SHIFT (PAGE_SHIFT - SECTOR_SHIFT)
+#define PAGE_SECTORS (1 << PAGE_SECTORS_SHIFT)
+#define SECTOR_SIZE (1 << SECTOR_SHIFT)
+#define SECTOR_MASK (PAGE_SECTORS - 1)
+
+#define FREE_BATCH 16
+
+#define TICKS_PER_SEC 50ULL
+#define TIMER_INTERVAL (NSEC_PER_SEC / TICKS_PER_SEC)
+
+static inline u64 mb_per_tick(int mbps)
+{
+ return (1 << 20) / TICKS_PER_SEC * ((u64) mbps);
+}
struct nullb_cmd {
struct list_head list;
struct llist_node ll_list;
- struct call_single_data csd;
+ call_single_data_t csd;
struct request *rq;
struct bio *bio;
unsigned int tag;
struct nullb_queue *nq;
struct hrtimer timer;
+ blk_status_t error;
};
struct nullb_queue {
unsigned long *tag_map;
wait_queue_head_t wait;
unsigned int queue_depth;
+ struct nullb_device *dev;
struct nullb_cmd *cmds;
};
+/*
+ * Status flags for nullb_device.
+ *
+ * CONFIGURED: Device has been configured and turned on. Cannot reconfigure.
+ * UP: Device is currently on and visible in userspace.
+ * THROTTLED: Device is being throttled.
+ * CACHE: Device is using a write-back cache.
+ */
+enum nullb_device_flags {
+ NULLB_DEV_FL_CONFIGURED = 0,
+ NULLB_DEV_FL_UP = 1,
+ NULLB_DEV_FL_THROTTLED = 2,
+ NULLB_DEV_FL_CACHE = 3,
+};
+
+/*
+ * nullb_page is a page in memory for nullb devices.
+ *
+ * @page: The page holding the data.
+ * @bitmap: The bitmap represents which sector in the page has data.
+ * Each bit represents one block size. For example, sector 8
+ * will use the 7th bit
+ * The highest 2 bits of bitmap are for special purpose. LOCK means the cache
+ * page is being flushing to storage. FREE means the cache page is freed and
+ * should be skipped from flushing to storage. Please see
+ * null_make_cache_space
+ */
+struct nullb_page {
+ struct page *page;
+ unsigned long bitmap;
+};
+#define NULLB_PAGE_LOCK (sizeof(unsigned long) * 8 - 1)
+#define NULLB_PAGE_FREE (sizeof(unsigned long) * 8 - 2)
+
+struct nullb_device {
+ struct nullb *nullb;
+ struct config_item item;
+ struct radix_tree_root data; /* data stored in the disk */
+ struct radix_tree_root cache; /* disk cache data */
+ unsigned long flags; /* device flags */
+ unsigned int curr_cache;
+ struct badblocks badblocks;
+
+ unsigned long size; /* device size in MB */
+ unsigned long completion_nsec; /* time in ns to complete a request */
+ unsigned long cache_size; /* disk cache size in MB */
+ unsigned int submit_queues; /* number of submission queues */
+ unsigned int home_node; /* home node for the device */
+ unsigned int queue_mode; /* block interface */
+ unsigned int blocksize; /* block size */
+ unsigned int irqmode; /* IRQ completion handler */
+ unsigned int hw_queue_depth; /* queue depth */
+ unsigned int index; /* index of the disk, only valid with a disk */
+ unsigned int mbps; /* Bandwidth throttle cap (in MB/s) */
+ bool use_lightnvm; /* register as a LightNVM device */
+ bool blocking; /* blocking blk-mq device */
+ bool use_per_node_hctx; /* use per-node allocation for hardware context */
+ bool power; /* power on/off the device */
+ bool memory_backed; /* if data is stored in memory */
+ bool discard; /* if support discard */
+};
+
struct nullb {
+ struct nullb_device *dev;
struct list_head list;
unsigned int index;
struct request_queue *q;
@@ -37,8 +124,10 @@ struct nullb {
struct nvm_dev *ndev;
struct blk_mq_tag_set *tag_set;
struct blk_mq_tag_set __tag_set;
- struct hrtimer timer;
unsigned int queue_depth;
+ atomic_long_t cur_bytes;
+ struct hrtimer bw_timer;
+ unsigned long cache_flush_pos;
spinlock_t lock;
struct nullb_queue *queues;
@@ -49,7 +138,7 @@ struct nullb {
static LIST_HEAD(nullb_list);
static struct mutex lock;
static int null_major;
-static int nullb_indexes;
+static DEFINE_IDA(nullb_indexes);
static struct kmem_cache *ppa_cache;
static struct blk_mq_tag_set tag_set;
@@ -65,15 +154,15 @@ enum {
NULL_Q_MQ = 2,
};
-static int submit_queues;
-module_param(submit_queues, int, S_IRUGO);
+static int g_submit_queues = 1;
+module_param_named(submit_queues, g_submit_queues, int, S_IRUGO);
MODULE_PARM_DESC(submit_queues, "Number of submission queues");
-static int home_node = NUMA_NO_NODE;
-module_param(home_node, int, S_IRUGO);
+static int g_home_node = NUMA_NO_NODE;
+module_param_named(home_node, g_home_node, int, S_IRUGO);
MODULE_PARM_DESC(home_node, "Home node for the device");
-static int queue_mode = NULL_Q_MQ;
+static int g_queue_mode = NULL_Q_MQ;
static int null_param_store_val(const char *str, int *val, int min, int max)
{
@@ -92,7 +181,7 @@ static int null_param_store_val(const char *str, int *val, int min, int max)
static int null_set_queue_mode(const char *str, const struct kernel_param *kp)
{
- return null_param_store_val(str, &queue_mode, NULL_Q_BIO, NULL_Q_MQ);
+ return null_param_store_val(str, &g_queue_mode, NULL_Q_BIO, NULL_Q_MQ);
}
static const struct kernel_param_ops null_queue_mode_param_ops = {
@@ -100,38 +189,38 @@ static const struct kernel_param_ops null_queue_mode_param_ops = {
.get = param_get_int,
};
-device_param_cb(queue_mode, &null_queue_mode_param_ops, &queue_mode, S_IRUGO);
+device_param_cb(queue_mode, &null_queue_mode_param_ops, &g_queue_mode, S_IRUGO);
MODULE_PARM_DESC(queue_mode, "Block interface to use (0=bio,1=rq,2=multiqueue)");
-static int gb = 250;
-module_param(gb, int, S_IRUGO);
+static int g_gb = 250;
+module_param_named(gb, g_gb, int, S_IRUGO);
MODULE_PARM_DESC(gb, "Size in GB");
-static int bs = 512;
-module_param(bs, int, S_IRUGO);
+static int g_bs = 512;
+module_param_named(bs, g_bs, int, S_IRUGO);
MODULE_PARM_DESC(bs, "Block size (in bytes)");
static int nr_devices = 1;
module_param(nr_devices, int, S_IRUGO);
MODULE_PARM_DESC(nr_devices, "Number of devices to register");
-static bool use_lightnvm;
-module_param(use_lightnvm, bool, S_IRUGO);
+static bool g_use_lightnvm;
+module_param_named(use_lightnvm, g_use_lightnvm, bool, S_IRUGO);
MODULE_PARM_DESC(use_lightnvm, "Register as a LightNVM device");
-static bool blocking;
-module_param(blocking, bool, S_IRUGO);
+static bool g_blocking;
+module_param_named(blocking, g_blocking, bool, S_IRUGO);
MODULE_PARM_DESC(blocking, "Register as a blocking blk-mq driver device");
static bool shared_tags;
module_param(shared_tags, bool, S_IRUGO);
MODULE_PARM_DESC(shared_tags, "Share tag set between devices for blk-mq");
-static int irqmode = NULL_IRQ_SOFTIRQ;
+static int g_irqmode = NULL_IRQ_SOFTIRQ;
static int null_set_irqmode(const char *str, const struct kernel_param *kp)
{
- return null_param_store_val(str, &irqmode, NULL_IRQ_NONE,
+ return null_param_store_val(str, &g_irqmode, NULL_IRQ_NONE,
NULL_IRQ_TIMER);
}
@@ -140,21 +229,358 @@ static const struct kernel_param_ops null_irqmode_param_ops = {
.get = param_get_int,
};
-device_param_cb(irqmode, &null_irqmode_param_ops, &irqmode, S_IRUGO);
+device_param_cb(irqmode, &null_irqmode_param_ops, &g_irqmode, S_IRUGO);
MODULE_PARM_DESC(irqmode, "IRQ completion handler. 0-none, 1-softirq, 2-timer");
-static unsigned long completion_nsec = 10000;
-module_param(completion_nsec, ulong, S_IRUGO);
+static unsigned long g_completion_nsec = 10000;
+module_param_named(completion_nsec, g_completion_nsec, ulong, S_IRUGO);
MODULE_PARM_DESC(completion_nsec, "Time in ns to complete a request in hardware. Default: 10,000ns");
-static int hw_queue_depth = 64;
-module_param(hw_queue_depth, int, S_IRUGO);
+static int g_hw_queue_depth = 64;
+module_param_named(hw_queue_depth, g_hw_queue_depth, int, S_IRUGO);
MODULE_PARM_DESC(hw_queue_depth, "Queue depth for each hardware queue. Default: 64");
-static bool use_per_node_hctx = false;
-module_param(use_per_node_hctx, bool, S_IRUGO);
+static bool g_use_per_node_hctx;
+module_param_named(use_per_node_hctx, g_use_per_node_hctx, bool, S_IRUGO);
MODULE_PARM_DESC(use_per_node_hctx, "Use per-node allocation for hardware context queues. Default: false");
+static struct nullb_device *null_alloc_dev(void);
+static void null_free_dev(struct nullb_device *dev);
+static void null_del_dev(struct nullb *nullb);
+static int null_add_dev(struct nullb_device *dev);
+static void null_free_device_storage(struct nullb_device *dev, bool is_cache);
+
+static inline struct nullb_device *to_nullb_device(struct config_item *item)
+{
+ return item ? container_of(item, struct nullb_device, item) : NULL;
+}
+
+static inline ssize_t nullb_device_uint_attr_show(unsigned int val, char *page)
+{
+ return snprintf(page, PAGE_SIZE, "%u\n", val);
+}
+
+static inline ssize_t nullb_device_ulong_attr_show(unsigned long val,
+ char *page)
+{
+ return snprintf(page, PAGE_SIZE, "%lu\n", val);
+}
+
+static inline ssize_t nullb_device_bool_attr_show(bool val, char *page)
+{
+ return snprintf(page, PAGE_SIZE, "%u\n", val);
+}
+
+static ssize_t nullb_device_uint_attr_store(unsigned int *val,
+ const char *page, size_t count)
+{
+ unsigned int tmp;
+ int result;
+
+ result = kstrtouint(page, 0, &tmp);
+ if (result)
+ return result;
+
+ *val = tmp;
+ return count;
+}
+
+static ssize_t nullb_device_ulong_attr_store(unsigned long *val,
+ const char *page, size_t count)
+{
+ int result;
+ unsigned long tmp;
+
+ result = kstrtoul(page, 0, &tmp);
+ if (result)
+ return result;
+
+ *val = tmp;
+ return count;
+}
+
+static ssize_t nullb_device_bool_attr_store(bool *val, const char *page,
+ size_t count)
+{
+ bool tmp;
+ int result;
+
+ result = kstrtobool(page, &tmp);
+ if (result)
+ return result;
+
+ *val = tmp;
+ return count;
+}
+
+/* The following macro should only be used with TYPE = {uint, ulong, bool}. */
+#define NULLB_DEVICE_ATTR(NAME, TYPE) \
+static ssize_t \
+nullb_device_##NAME##_show(struct config_item *item, char *page) \
+{ \
+ return nullb_device_##TYPE##_attr_show( \
+ to_nullb_device(item)->NAME, page); \
+} \
+static ssize_t \
+nullb_device_##NAME##_store(struct config_item *item, const char *page, \
+ size_t count) \
+{ \
+ if (test_bit(NULLB_DEV_FL_CONFIGURED, &to_nullb_device(item)->flags)) \
+ return -EBUSY; \
+ return nullb_device_##TYPE##_attr_store( \
+ &to_nullb_device(item)->NAME, page, count); \
+} \
+CONFIGFS_ATTR(nullb_device_, NAME);
+
+NULLB_DEVICE_ATTR(size, ulong);
+NULLB_DEVICE_ATTR(completion_nsec, ulong);
+NULLB_DEVICE_ATTR(submit_queues, uint);
+NULLB_DEVICE_ATTR(home_node, uint);
+NULLB_DEVICE_ATTR(queue_mode, uint);
+NULLB_DEVICE_ATTR(blocksize, uint);
+NULLB_DEVICE_ATTR(irqmode, uint);
+NULLB_DEVICE_ATTR(hw_queue_depth, uint);
+NULLB_DEVICE_ATTR(index, uint);
+NULLB_DEVICE_ATTR(use_lightnvm, bool);
+NULLB_DEVICE_ATTR(blocking, bool);
+NULLB_DEVICE_ATTR(use_per_node_hctx, bool);
+NULLB_DEVICE_ATTR(memory_backed, bool);
+NULLB_DEVICE_ATTR(discard, bool);
+NULLB_DEVICE_ATTR(mbps, uint);
+NULLB_DEVICE_ATTR(cache_size, ulong);
+
+static ssize_t nullb_device_power_show(struct config_item *item, char *page)
+{
+ return nullb_device_bool_attr_show(to_nullb_device(item)->power, page);
+}
+
+static ssize_t nullb_device_power_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nullb_device *dev = to_nullb_device(item);
+ bool newp = false;
+ ssize_t ret;
+
+ ret = nullb_device_bool_attr_store(&newp, page, count);
+ if (ret < 0)
+ return ret;
+
+ if (!dev->power && newp) {
+ if (test_and_set_bit(NULLB_DEV_FL_UP, &dev->flags))
+ return count;
+ if (null_add_dev(dev)) {
+ clear_bit(NULLB_DEV_FL_UP, &dev->flags);
+ return -ENOMEM;
+ }
+
+ set_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags);
+ dev->power = newp;
+ } else if (dev->power && !newp) {
+ mutex_lock(&lock);
+ dev->power = newp;
+ null_del_dev(dev->nullb);
+ mutex_unlock(&lock);
+ clear_bit(NULLB_DEV_FL_UP, &dev->flags);
+ }
+
+ return count;
+}
+
+CONFIGFS_ATTR(nullb_device_, power);
+
+static ssize_t nullb_device_badblocks_show(struct config_item *item, char *page)
+{
+ struct nullb_device *t_dev = to_nullb_device(item);
+
+ return badblocks_show(&t_dev->badblocks, page, 0);
+}
+
+static ssize_t nullb_device_badblocks_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nullb_device *t_dev = to_nullb_device(item);
+ char *orig, *buf, *tmp;
+ u64 start, end;
+ int ret;
+
+ orig = kstrndup(page, count, GFP_KERNEL);
+ if (!orig)
+ return -ENOMEM;
+
+ buf = strstrip(orig);
+
+ ret = -EINVAL;
+ if (buf[0] != '+' && buf[0] != '-')
+ goto out;
+ tmp = strchr(&buf[1], '-');
+ if (!tmp)
+ goto out;
+ *tmp = '\0';
+ ret = kstrtoull(buf + 1, 0, &start);
+ if (ret)
+ goto out;
+ ret = kstrtoull(tmp + 1, 0, &end);
+ if (ret)
+ goto out;
+ ret = -EINVAL;
+ if (start > end)
+ goto out;
+ /* enable badblocks */
+ cmpxchg(&t_dev->badblocks.shift, -1, 0);
+ if (buf[0] == '+')
+ ret = badblocks_set(&t_dev->badblocks, start,
+ end - start + 1, 1);
+ else
+ ret = badblocks_clear(&t_dev->badblocks, start,
+ end - start + 1);
+ if (ret == 0)
+ ret = count;
+out:
+ kfree(orig);
+ return ret;
+}
+CONFIGFS_ATTR(nullb_device_, badblocks);
+
+static struct configfs_attribute *nullb_device_attrs[] = {
+ &nullb_device_attr_size,
+ &nullb_device_attr_completion_nsec,
+ &nullb_device_attr_submit_queues,
+ &nullb_device_attr_home_node,
+ &nullb_device_attr_queue_mode,
+ &nullb_device_attr_blocksize,
+ &nullb_device_attr_irqmode,
+ &nullb_device_attr_hw_queue_depth,
+ &nullb_device_attr_index,
+ &nullb_device_attr_use_lightnvm,
+ &nullb_device_attr_blocking,
+ &nullb_device_attr_use_per_node_hctx,
+ &nullb_device_attr_power,
+ &nullb_device_attr_memory_backed,
+ &nullb_device_attr_discard,
+ &nullb_device_attr_mbps,
+ &nullb_device_attr_cache_size,
+ &nullb_device_attr_badblocks,
+ NULL,
+};
+
+static void nullb_device_release(struct config_item *item)
+{
+ struct nullb_device *dev = to_nullb_device(item);
+
+ badblocks_exit(&dev->badblocks);
+ null_free_device_storage(dev, false);
+ null_free_dev(dev);
+}
+
+static struct configfs_item_operations nullb_device_ops = {
+ .release = nullb_device_release,
+};
+
+static struct config_item_type nullb_device_type = {
+ .ct_item_ops = &nullb_device_ops,
+ .ct_attrs = nullb_device_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+static struct
+config_item *nullb_group_make_item(struct config_group *group, const char *name)
+{
+ struct nullb_device *dev;
+
+ dev = null_alloc_dev();
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+ config_item_init_type_name(&dev->item, name, &nullb_device_type);
+
+ return &dev->item;
+}
+
+static void
+nullb_group_drop_item(struct config_group *group, struct config_item *item)
+{
+ struct nullb_device *dev = to_nullb_device(item);
+
+ if (test_and_clear_bit(NULLB_DEV_FL_UP, &dev->flags)) {
+ mutex_lock(&lock);
+ dev->power = false;
+ null_del_dev(dev->nullb);
+ mutex_unlock(&lock);
+ }
+
+ config_item_put(item);
+}
+
+static ssize_t memb_group_features_show(struct config_item *item, char *page)
+{
+ return snprintf(page, PAGE_SIZE, "memory_backed,discard,bandwidth,cache,badblocks\n");
+}
+
+CONFIGFS_ATTR_RO(memb_group_, features);
+
+static struct configfs_attribute *nullb_group_attrs[] = {
+ &memb_group_attr_features,
+ NULL,
+};
+
+static struct configfs_group_operations nullb_group_ops = {
+ .make_item = nullb_group_make_item,
+ .drop_item = nullb_group_drop_item,
+};
+
+static struct config_item_type nullb_group_type = {
+ .ct_group_ops = &nullb_group_ops,
+ .ct_attrs = nullb_group_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+static struct configfs_subsystem nullb_subsys = {
+ .su_group = {
+ .cg_item = {
+ .ci_namebuf = "nullb",
+ .ci_type = &nullb_group_type,
+ },
+ },
+};
+
+static inline int null_cache_active(struct nullb *nullb)
+{
+ return test_bit(NULLB_DEV_FL_CACHE, &nullb->dev->flags);
+}
+
+static struct nullb_device *null_alloc_dev(void)
+{
+ struct nullb_device *dev;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return NULL;
+ INIT_RADIX_TREE(&dev->data, GFP_ATOMIC);
+ INIT_RADIX_TREE(&dev->cache, GFP_ATOMIC);
+ if (badblocks_init(&dev->badblocks, 0)) {
+ kfree(dev);
+ return NULL;
+ }
+
+ dev->size = g_gb * 1024;
+ dev->completion_nsec = g_completion_nsec;
+ dev->submit_queues = g_submit_queues;
+ dev->home_node = g_home_node;
+ dev->queue_mode = g_queue_mode;
+ dev->blocksize = g_bs;
+ dev->irqmode = g_irqmode;
+ dev->hw_queue_depth = g_hw_queue_depth;
+ dev->use_lightnvm = g_use_lightnvm;
+ dev->blocking = g_blocking;
+ dev->use_per_node_hctx = g_use_per_node_hctx;
+ return dev;
+}
+
+static void null_free_dev(struct nullb_device *dev)
+{
+ kfree(dev);
+}
+
static void put_tag(struct nullb_queue *nq, unsigned int tag)
{
clear_bit_unlock(tag, nq->tag_map);
@@ -193,7 +619,7 @@ static struct nullb_cmd *__alloc_cmd(struct nullb_queue *nq)
cmd = &nq->cmds[tag];
cmd->tag = tag;
cmd->nq = nq;
- if (irqmode == NULL_IRQ_TIMER) {
+ if (nq->dev->irqmode == NULL_IRQ_TIMER) {
hrtimer_init(&cmd->timer, CLOCK_MONOTONIC,
HRTIMER_MODE_REL);
cmd->timer.function = null_cmd_timer_expired;
@@ -229,19 +655,21 @@ static struct nullb_cmd *alloc_cmd(struct nullb_queue *nq, int can_wait)
static void end_cmd(struct nullb_cmd *cmd)
{
struct request_queue *q = NULL;
+ int queue_mode = cmd->nq->dev->queue_mode;
if (cmd->rq)
q = cmd->rq->q;
switch (queue_mode) {
case NULL_Q_MQ:
- blk_mq_end_request(cmd->rq, BLK_STS_OK);
+ blk_mq_end_request(cmd->rq, cmd->error);
return;
case NULL_Q_RQ:
INIT_LIST_HEAD(&cmd->rq->queuelist);
- blk_end_request_all(cmd->rq, BLK_STS_OK);
+ blk_end_request_all(cmd->rq, cmd->error);
break;
case NULL_Q_BIO:
+ cmd->bio->bi_status = cmd->error;
bio_endio(cmd->bio);
break;
}
@@ -267,25 +695,582 @@ static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer)
static void null_cmd_end_timer(struct nullb_cmd *cmd)
{
- ktime_t kt = completion_nsec;
+ ktime_t kt = cmd->nq->dev->completion_nsec;
hrtimer_start(&cmd->timer, kt, HRTIMER_MODE_REL);
}
static void null_softirq_done_fn(struct request *rq)
{
- if (queue_mode == NULL_Q_MQ)
+ struct nullb *nullb = rq->q->queuedata;
+
+ if (nullb->dev->queue_mode == NULL_Q_MQ)
end_cmd(blk_mq_rq_to_pdu(rq));
else
end_cmd(rq->special);
}
-static inline void null_handle_cmd(struct nullb_cmd *cmd)
+static struct nullb_page *null_alloc_page(gfp_t gfp_flags)
+{
+ struct nullb_page *t_page;
+
+ t_page = kmalloc(sizeof(struct nullb_page), gfp_flags);
+ if (!t_page)
+ goto out;
+
+ t_page->page = alloc_pages(gfp_flags, 0);
+ if (!t_page->page)
+ goto out_freepage;
+
+ t_page->bitmap = 0;
+ return t_page;
+out_freepage:
+ kfree(t_page);
+out:
+ return NULL;
+}
+
+static void null_free_page(struct nullb_page *t_page)
+{
+ __set_bit(NULLB_PAGE_FREE, &t_page->bitmap);
+ if (test_bit(NULLB_PAGE_LOCK, &t_page->bitmap))
+ return;
+ __free_page(t_page->page);
+ kfree(t_page);
+}
+
+static void null_free_sector(struct nullb *nullb, sector_t sector,
+ bool is_cache)
+{
+ unsigned int sector_bit;
+ u64 idx;
+ struct nullb_page *t_page, *ret;
+ struct radix_tree_root *root;
+
+ root = is_cache ? &nullb->dev->cache : &nullb->dev->data;
+ idx = sector >> PAGE_SECTORS_SHIFT;
+ sector_bit = (sector & SECTOR_MASK);
+
+ t_page = radix_tree_lookup(root, idx);
+ if (t_page) {
+ __clear_bit(sector_bit, &t_page->bitmap);
+
+ if (!t_page->bitmap) {
+ ret = radix_tree_delete_item(root, idx, t_page);
+ WARN_ON(ret != t_page);
+ null_free_page(ret);
+ if (is_cache)
+ nullb->dev->curr_cache -= PAGE_SIZE;
+ }
+ }
+}
+
+static struct nullb_page *null_radix_tree_insert(struct nullb *nullb, u64 idx,
+ struct nullb_page *t_page, bool is_cache)
+{
+ struct radix_tree_root *root;
+
+ root = is_cache ? &nullb->dev->cache : &nullb->dev->data;
+
+ if (radix_tree_insert(root, idx, t_page)) {
+ null_free_page(t_page);
+ t_page = radix_tree_lookup(root, idx);
+ WARN_ON(!t_page || t_page->page->index != idx);
+ } else if (is_cache)
+ nullb->dev->curr_cache += PAGE_SIZE;
+
+ return t_page;
+}
+
+static void null_free_device_storage(struct nullb_device *dev, bool is_cache)
+{
+ unsigned long pos = 0;
+ int nr_pages;
+ struct nullb_page *ret, *t_pages[FREE_BATCH];
+ struct radix_tree_root *root;
+
+ root = is_cache ? &dev->cache : &dev->data;
+
+ do {
+ int i;
+
+ nr_pages = radix_tree_gang_lookup(root,
+ (void **)t_pages, pos, FREE_BATCH);
+
+ for (i = 0; i < nr_pages; i++) {
+ pos = t_pages[i]->page->index;
+ ret = radix_tree_delete_item(root, pos, t_pages[i]);
+ WARN_ON(ret != t_pages[i]);
+ null_free_page(ret);
+ }
+
+ pos++;
+ } while (nr_pages == FREE_BATCH);
+
+ if (is_cache)
+ dev->curr_cache = 0;
+}
+
+static struct nullb_page *__null_lookup_page(struct nullb *nullb,
+ sector_t sector, bool for_write, bool is_cache)
+{
+ unsigned int sector_bit;
+ u64 idx;
+ struct nullb_page *t_page;
+ struct radix_tree_root *root;
+
+ idx = sector >> PAGE_SECTORS_SHIFT;
+ sector_bit = (sector & SECTOR_MASK);
+
+ root = is_cache ? &nullb->dev->cache : &nullb->dev->data;
+ t_page = radix_tree_lookup(root, idx);
+ WARN_ON(t_page && t_page->page->index != idx);
+
+ if (t_page && (for_write || test_bit(sector_bit, &t_page->bitmap)))
+ return t_page;
+
+ return NULL;
+}
+
+static struct nullb_page *null_lookup_page(struct nullb *nullb,
+ sector_t sector, bool for_write, bool ignore_cache)
+{
+ struct nullb_page *page = NULL;
+
+ if (!ignore_cache)
+ page = __null_lookup_page(nullb, sector, for_write, true);
+ if (page)
+ return page;
+ return __null_lookup_page(nullb, sector, for_write, false);
+}
+
+static struct nullb_page *null_insert_page(struct nullb *nullb,
+ sector_t sector, bool ignore_cache)
+{
+ u64 idx;
+ struct nullb_page *t_page;
+
+ t_page = null_lookup_page(nullb, sector, true, ignore_cache);
+ if (t_page)
+ return t_page;
+
+ spin_unlock_irq(&nullb->lock);
+
+ t_page = null_alloc_page(GFP_NOIO);
+ if (!t_page)
+ goto out_lock;
+
+ if (radix_tree_preload(GFP_NOIO))
+ goto out_freepage;
+
+ spin_lock_irq(&nullb->lock);
+ idx = sector >> PAGE_SECTORS_SHIFT;
+ t_page->page->index = idx;
+ t_page = null_radix_tree_insert(nullb, idx, t_page, !ignore_cache);
+ radix_tree_preload_end();
+
+ return t_page;
+out_freepage:
+ null_free_page(t_page);
+out_lock:
+ spin_lock_irq(&nullb->lock);
+ return null_lookup_page(nullb, sector, true, ignore_cache);
+}
+
+static int null_flush_cache_page(struct nullb *nullb, struct nullb_page *c_page)
+{
+ int i;
+ unsigned int offset;
+ u64 idx;
+ struct nullb_page *t_page, *ret;
+ void *dst, *src;
+
+ idx = c_page->page->index;
+
+ t_page = null_insert_page(nullb, idx << PAGE_SECTORS_SHIFT, true);
+
+ __clear_bit(NULLB_PAGE_LOCK, &c_page->bitmap);
+ if (test_bit(NULLB_PAGE_FREE, &c_page->bitmap)) {
+ null_free_page(c_page);
+ if (t_page && t_page->bitmap == 0) {
+ ret = radix_tree_delete_item(&nullb->dev->data,
+ idx, t_page);
+ null_free_page(t_page);
+ }
+ return 0;
+ }
+
+ if (!t_page)
+ return -ENOMEM;
+
+ src = kmap_atomic(c_page->page);
+ dst = kmap_atomic(t_page->page);
+
+ for (i = 0; i < PAGE_SECTORS;
+ i += (nullb->dev->blocksize >> SECTOR_SHIFT)) {
+ if (test_bit(i, &c_page->bitmap)) {
+ offset = (i << SECTOR_SHIFT);
+ memcpy(dst + offset, src + offset,
+ nullb->dev->blocksize);
+ __set_bit(i, &t_page->bitmap);
+ }
+ }
+
+ kunmap_atomic(dst);
+ kunmap_atomic(src);
+
+ ret = radix_tree_delete_item(&nullb->dev->cache, idx, c_page);
+ null_free_page(ret);
+ nullb->dev->curr_cache -= PAGE_SIZE;
+
+ return 0;
+}
+
+static int null_make_cache_space(struct nullb *nullb, unsigned long n)
{
+ int i, err, nr_pages;
+ struct nullb_page *c_pages[FREE_BATCH];
+ unsigned long flushed = 0, one_round;
+
+again:
+ if ((nullb->dev->cache_size * 1024 * 1024) >
+ nullb->dev->curr_cache + n || nullb->dev->curr_cache == 0)
+ return 0;
+
+ nr_pages = radix_tree_gang_lookup(&nullb->dev->cache,
+ (void **)c_pages, nullb->cache_flush_pos, FREE_BATCH);
+ /*
+ * nullb_flush_cache_page could unlock before using the c_pages. To
+ * avoid race, we don't allow page free
+ */
+ for (i = 0; i < nr_pages; i++) {
+ nullb->cache_flush_pos = c_pages[i]->page->index;
+ /*
+ * We found the page which is being flushed to disk by other
+ * threads
+ */
+ if (test_bit(NULLB_PAGE_LOCK, &c_pages[i]->bitmap))
+ c_pages[i] = NULL;
+ else
+ __set_bit(NULLB_PAGE_LOCK, &c_pages[i]->bitmap);
+ }
+
+ one_round = 0;
+ for (i = 0; i < nr_pages; i++) {
+ if (c_pages[i] == NULL)
+ continue;
+ err = null_flush_cache_page(nullb, c_pages[i]);
+ if (err)
+ return err;
+ one_round++;
+ }
+ flushed += one_round << PAGE_SHIFT;
+
+ if (n > flushed) {
+ if (nr_pages == 0)
+ nullb->cache_flush_pos = 0;
+ if (one_round == 0) {
+ /* give other threads a chance */
+ spin_unlock_irq(&nullb->lock);
+ spin_lock_irq(&nullb->lock);
+ }
+ goto again;
+ }
+ return 0;
+}
+
+static int copy_to_nullb(struct nullb *nullb, struct page *source,
+ unsigned int off, sector_t sector, size_t n, bool is_fua)
+{
+ size_t temp, count = 0;
+ unsigned int offset;
+ struct nullb_page *t_page;
+ void *dst, *src;
+
+ while (count < n) {
+ temp = min_t(size_t, nullb->dev->blocksize, n - count);
+
+ if (null_cache_active(nullb) && !is_fua)
+ null_make_cache_space(nullb, PAGE_SIZE);
+
+ offset = (sector & SECTOR_MASK) << SECTOR_SHIFT;
+ t_page = null_insert_page(nullb, sector,
+ !null_cache_active(nullb) || is_fua);
+ if (!t_page)
+ return -ENOSPC;
+
+ src = kmap_atomic(source);
+ dst = kmap_atomic(t_page->page);
+ memcpy(dst + offset, src + off + count, temp);
+ kunmap_atomic(dst);
+ kunmap_atomic(src);
+
+ __set_bit(sector & SECTOR_MASK, &t_page->bitmap);
+
+ if (is_fua)
+ null_free_sector(nullb, sector, true);
+
+ count += temp;
+ sector += temp >> SECTOR_SHIFT;
+ }
+ return 0;
+}
+
+static int copy_from_nullb(struct nullb *nullb, struct page *dest,
+ unsigned int off, sector_t sector, size_t n)
+{
+ size_t temp, count = 0;
+ unsigned int offset;
+ struct nullb_page *t_page;
+ void *dst, *src;
+
+ while (count < n) {
+ temp = min_t(size_t, nullb->dev->blocksize, n - count);
+
+ offset = (sector & SECTOR_MASK) << SECTOR_SHIFT;
+ t_page = null_lookup_page(nullb, sector, false,
+ !null_cache_active(nullb));
+
+ dst = kmap_atomic(dest);
+ if (!t_page) {
+ memset(dst + off + count, 0, temp);
+ goto next;
+ }
+ src = kmap_atomic(t_page->page);
+ memcpy(dst + off + count, src + offset, temp);
+ kunmap_atomic(src);
+next:
+ kunmap_atomic(dst);
+
+ count += temp;
+ sector += temp >> SECTOR_SHIFT;
+ }
+ return 0;
+}
+
+static void null_handle_discard(struct nullb *nullb, sector_t sector, size_t n)
+{
+ size_t temp;
+
+ spin_lock_irq(&nullb->lock);
+ while (n > 0) {
+ temp = min_t(size_t, n, nullb->dev->blocksize);
+ null_free_sector(nullb, sector, false);
+ if (null_cache_active(nullb))
+ null_free_sector(nullb, sector, true);
+ sector += temp >> SECTOR_SHIFT;
+ n -= temp;
+ }
+ spin_unlock_irq(&nullb->lock);
+}
+
+static int null_handle_flush(struct nullb *nullb)
+{
+ int err;
+
+ if (!null_cache_active(nullb))
+ return 0;
+
+ spin_lock_irq(&nullb->lock);
+ while (true) {
+ err = null_make_cache_space(nullb,
+ nullb->dev->cache_size * 1024 * 1024);
+ if (err || nullb->dev->curr_cache == 0)
+ break;
+ }
+
+ WARN_ON(!radix_tree_empty(&nullb->dev->cache));
+ spin_unlock_irq(&nullb->lock);
+ return err;
+}
+
+static int null_transfer(struct nullb *nullb, struct page *page,
+ unsigned int len, unsigned int off, bool is_write, sector_t sector,
+ bool is_fua)
+{
+ int err = 0;
+
+ if (!is_write) {
+ err = copy_from_nullb(nullb, page, off, sector, len);
+ flush_dcache_page(page);
+ } else {
+ flush_dcache_page(page);
+ err = copy_to_nullb(nullb, page, off, sector, len, is_fua);
+ }
+
+ return err;
+}
+
+static int null_handle_rq(struct nullb_cmd *cmd)
+{
+ struct request *rq = cmd->rq;
+ struct nullb *nullb = cmd->nq->dev->nullb;
+ int err;
+ unsigned int len;
+ sector_t sector;
+ struct req_iterator iter;
+ struct bio_vec bvec;
+
+ sector = blk_rq_pos(rq);
+
+ if (req_op(rq) == REQ_OP_DISCARD) {
+ null_handle_discard(nullb, sector, blk_rq_bytes(rq));
+ return 0;
+ }
+
+ spin_lock_irq(&nullb->lock);
+ rq_for_each_segment(bvec, rq, iter) {
+ len = bvec.bv_len;
+ err = null_transfer(nullb, bvec.bv_page, len, bvec.bv_offset,
+ op_is_write(req_op(rq)), sector,
+ req_op(rq) & REQ_FUA);
+ if (err) {
+ spin_unlock_irq(&nullb->lock);
+ return err;
+ }
+ sector += len >> SECTOR_SHIFT;
+ }
+ spin_unlock_irq(&nullb->lock);
+
+ return 0;
+}
+
+static int null_handle_bio(struct nullb_cmd *cmd)
+{
+ struct bio *bio = cmd->bio;
+ struct nullb *nullb = cmd->nq->dev->nullb;
+ int err;
+ unsigned int len;
+ sector_t sector;
+ struct bio_vec bvec;
+ struct bvec_iter iter;
+
+ sector = bio->bi_iter.bi_sector;
+
+ if (bio_op(bio) == REQ_OP_DISCARD) {
+ null_handle_discard(nullb, sector,
+ bio_sectors(bio) << SECTOR_SHIFT);
+ return 0;
+ }
+
+ spin_lock_irq(&nullb->lock);
+ bio_for_each_segment(bvec, bio, iter) {
+ len = bvec.bv_len;
+ err = null_transfer(nullb, bvec.bv_page, len, bvec.bv_offset,
+ op_is_write(bio_op(bio)), sector,
+ bio_op(bio) & REQ_FUA);
+ if (err) {
+ spin_unlock_irq(&nullb->lock);
+ return err;
+ }
+ sector += len >> SECTOR_SHIFT;
+ }
+ spin_unlock_irq(&nullb->lock);
+ return 0;
+}
+
+static void null_stop_queue(struct nullb *nullb)
+{
+ struct request_queue *q = nullb->q;
+
+ if (nullb->dev->queue_mode == NULL_Q_MQ)
+ blk_mq_stop_hw_queues(q);
+ else {
+ spin_lock_irq(q->queue_lock);
+ blk_stop_queue(q);
+ spin_unlock_irq(q->queue_lock);
+ }
+}
+
+static void null_restart_queue_async(struct nullb *nullb)
+{
+ struct request_queue *q = nullb->q;
+ unsigned long flags;
+
+ if (nullb->dev->queue_mode == NULL_Q_MQ)
+ blk_mq_start_stopped_hw_queues(q, true);
+ else {
+ spin_lock_irqsave(q->queue_lock, flags);
+ blk_start_queue_async(q);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+ }
+}
+
+static blk_status_t null_handle_cmd(struct nullb_cmd *cmd)
+{
+ struct nullb_device *dev = cmd->nq->dev;
+ struct nullb *nullb = dev->nullb;
+ int err = 0;
+
+ if (test_bit(NULLB_DEV_FL_THROTTLED, &dev->flags)) {
+ struct request *rq = cmd->rq;
+
+ if (!hrtimer_active(&nullb->bw_timer))
+ hrtimer_restart(&nullb->bw_timer);
+
+ if (atomic_long_sub_return(blk_rq_bytes(rq),
+ &nullb->cur_bytes) < 0) {
+ null_stop_queue(nullb);
+ /* race with timer */
+ if (atomic_long_read(&nullb->cur_bytes) > 0)
+ null_restart_queue_async(nullb);
+ if (dev->queue_mode == NULL_Q_RQ) {
+ struct request_queue *q = nullb->q;
+
+ spin_lock_irq(q->queue_lock);
+ rq->rq_flags |= RQF_DONTPREP;
+ blk_requeue_request(q, rq);
+ spin_unlock_irq(q->queue_lock);
+ return BLK_STS_OK;
+ } else
+ /* requeue request */
+ return BLK_STS_RESOURCE;
+ }
+ }
+
+ if (nullb->dev->badblocks.shift != -1) {
+ int bad_sectors;
+ sector_t sector, size, first_bad;
+ bool is_flush = true;
+
+ if (dev->queue_mode == NULL_Q_BIO &&
+ bio_op(cmd->bio) != REQ_OP_FLUSH) {
+ is_flush = false;
+ sector = cmd->bio->bi_iter.bi_sector;
+ size = bio_sectors(cmd->bio);
+ }
+ if (dev->queue_mode != NULL_Q_BIO &&
+ req_op(cmd->rq) != REQ_OP_FLUSH) {
+ is_flush = false;
+ sector = blk_rq_pos(cmd->rq);
+ size = blk_rq_sectors(cmd->rq);
+ }
+ if (!is_flush && badblocks_check(&nullb->dev->badblocks, sector,
+ size, &first_bad, &bad_sectors)) {
+ cmd->error = BLK_STS_IOERR;
+ goto out;
+ }
+ }
+
+ if (dev->memory_backed) {
+ if (dev->queue_mode == NULL_Q_BIO) {
+ if (bio_op(cmd->bio) == REQ_OP_FLUSH)
+ err = null_handle_flush(nullb);
+ else
+ err = null_handle_bio(cmd);
+ } else {
+ if (req_op(cmd->rq) == REQ_OP_FLUSH)
+ err = null_handle_flush(nullb);
+ else
+ err = null_handle_rq(cmd);
+ }
+ }
+ cmd->error = errno_to_blk_status(err);
+out:
/* Complete IO by inline, softirq or timer */
- switch (irqmode) {
+ switch (dev->irqmode) {
case NULL_IRQ_SOFTIRQ:
- switch (queue_mode) {
+ switch (dev->queue_mode) {
case NULL_Q_MQ:
blk_mq_complete_request(cmd->rq);
break;
@@ -307,6 +1292,34 @@ static inline void null_handle_cmd(struct nullb_cmd *cmd)
null_cmd_end_timer(cmd);
break;
}
+ return BLK_STS_OK;
+}
+
+static enum hrtimer_restart nullb_bwtimer_fn(struct hrtimer *timer)
+{
+ struct nullb *nullb = container_of(timer, struct nullb, bw_timer);
+ ktime_t timer_interval = ktime_set(0, TIMER_INTERVAL);
+ unsigned int mbps = nullb->dev->mbps;
+
+ if (atomic_long_read(&nullb->cur_bytes) == mb_per_tick(mbps))
+ return HRTIMER_NORESTART;
+
+ atomic_long_set(&nullb->cur_bytes, mb_per_tick(mbps));
+ null_restart_queue_async(nullb);
+
+ hrtimer_forward_now(&nullb->bw_timer, timer_interval);
+
+ return HRTIMER_RESTART;
+}
+
+static void nullb_setup_bwtimer(struct nullb *nullb)
+{
+ ktime_t timer_interval = ktime_set(0, TIMER_INTERVAL);
+
+ hrtimer_init(&nullb->bw_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ nullb->bw_timer.function = nullb_bwtimer_fn;
+ atomic_long_set(&nullb->cur_bytes, mb_per_tick(nullb->dev->mbps));
+ hrtimer_start(&nullb->bw_timer, timer_interval, HRTIMER_MODE_REL);
}
static struct nullb_queue *nullb_to_queue(struct nullb *nullb)
@@ -366,20 +1379,20 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
struct nullb_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
+ struct nullb_queue *nq = hctx->driver_data;
might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
- if (irqmode == NULL_IRQ_TIMER) {
+ if (nq->dev->irqmode == NULL_IRQ_TIMER) {
hrtimer_init(&cmd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
cmd->timer.function = null_cmd_timer_expired;
}
cmd->rq = bd->rq;
- cmd->nq = hctx->driver_data;
+ cmd->nq = nq;
blk_mq_start_request(bd->rq);
- null_handle_cmd(cmd);
- return BLK_STS_OK;
+ return null_handle_cmd(cmd);
}
static const struct blk_mq_ops null_mq_ops = {
@@ -438,7 +1451,8 @@ static int null_lnvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
static int null_lnvm_id(struct nvm_dev *dev, struct nvm_id *id)
{
- sector_t size = gb * 1024 * 1024 * 1024ULL;
+ struct nullb *nullb = dev->q->queuedata;
+ sector_t size = (sector_t)nullb->dev->size * 1024 * 1024ULL;
sector_t blksize;
struct nvm_id_group *grp;
@@ -460,7 +1474,7 @@ static int null_lnvm_id(struct nvm_dev *dev, struct nvm_id *id)
id->ppaf.ch_offset = 56;
id->ppaf.ch_len = 8;
- sector_div(size, bs); /* convert size to pages */
+ sector_div(size, nullb->dev->blocksize); /* convert size to pages */
size >>= 8; /* concert size to pgs pr blk */
grp = &id->grp;
grp->mtype = 0;
@@ -474,8 +1488,8 @@ static int null_lnvm_id(struct nvm_dev *dev, struct nvm_id *id)
grp->num_blk = blksize;
grp->num_pln = 1;
- grp->fpg_sz = bs;
- grp->csecs = bs;
+ grp->fpg_sz = nullb->dev->blocksize;
+ grp->csecs = nullb->dev->blocksize;
grp->trdt = 25000;
grp->trdm = 25000;
grp->tprt = 500000;
@@ -483,7 +1497,7 @@ static int null_lnvm_id(struct nvm_dev *dev, struct nvm_id *id)
grp->tbet = 1500000;
grp->tbem = 1500000;
grp->mpos = 0x010101; /* single plane rwe */
- grp->cpar = hw_queue_depth;
+ grp->cpar = nullb->dev->hw_queue_depth;
return 0;
}
@@ -568,19 +1582,44 @@ static void null_nvm_unregister(struct nullb *nullb) {}
static void null_del_dev(struct nullb *nullb)
{
+ struct nullb_device *dev = nullb->dev;
+
+ ida_simple_remove(&nullb_indexes, nullb->index);
+
list_del_init(&nullb->list);
- if (use_lightnvm)
+ if (dev->use_lightnvm)
null_nvm_unregister(nullb);
else
del_gendisk(nullb->disk);
+
+ if (test_bit(NULLB_DEV_FL_THROTTLED, &nullb->dev->flags)) {
+ hrtimer_cancel(&nullb->bw_timer);
+ atomic_long_set(&nullb->cur_bytes, LONG_MAX);
+ null_restart_queue_async(nullb);
+ }
+
blk_cleanup_queue(nullb->q);
- if (queue_mode == NULL_Q_MQ && nullb->tag_set == &nullb->__tag_set)
+ if (dev->queue_mode == NULL_Q_MQ &&
+ nullb->tag_set == &nullb->__tag_set)
blk_mq_free_tag_set(nullb->tag_set);
- if (!use_lightnvm)
+ if (!dev->use_lightnvm)
put_disk(nullb->disk);
cleanup_queues(nullb);
+ if (null_cache_active(nullb))
+ null_free_device_storage(nullb->dev, true);
kfree(nullb);
+ dev->nullb = NULL;
+}
+
+static void null_config_discard(struct nullb *nullb)
+{
+ if (nullb->dev->discard == false)
+ return;
+ nullb->q->limits.discard_granularity = nullb->dev->blocksize;
+ nullb->q->limits.discard_alignment = nullb->dev->blocksize;
+ blk_queue_max_discard_sectors(nullb->q, UINT_MAX >> 9);
+ queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, nullb->q);
}
static int null_open(struct block_device *bdev, fmode_t mode)
@@ -605,6 +1644,7 @@ static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq)
init_waitqueue_head(&nq->wait);
nq->queue_depth = nullb->queue_depth;
+ nq->dev = nullb->dev;
}
static void null_init_queues(struct nullb *nullb)
@@ -652,13 +1692,13 @@ static int setup_commands(struct nullb_queue *nq)
static int setup_queues(struct nullb *nullb)
{
- nullb->queues = kzalloc(submit_queues * sizeof(struct nullb_queue),
- GFP_KERNEL);
+ nullb->queues = kzalloc(nullb->dev->submit_queues *
+ sizeof(struct nullb_queue), GFP_KERNEL);
if (!nullb->queues)
return -ENOMEM;
nullb->nr_queues = 0;
- nullb->queue_depth = hw_queue_depth;
+ nullb->queue_depth = nullb->dev->hw_queue_depth;
return 0;
}
@@ -668,7 +1708,7 @@ static int init_driver_queues(struct nullb *nullb)
struct nullb_queue *nq;
int i, ret = 0;
- for (i = 0; i < submit_queues; i++) {
+ for (i = 0; i < nullb->dev->submit_queues; i++) {
nq = &nullb->queues[i];
null_init_queue(nullb, nq);
@@ -686,10 +1726,10 @@ static int null_gendisk_register(struct nullb *nullb)
struct gendisk *disk;
sector_t size;
- disk = nullb->disk = alloc_disk_node(1, home_node);
+ disk = nullb->disk = alloc_disk_node(1, nullb->dev->home_node);
if (!disk)
return -ENOMEM;
- size = gb * 1024 * 1024 * 1024ULL;
+ size = (sector_t)nullb->dev->size * 1024 * 1024ULL;
set_capacity(disk, size >> 9);
disk->flags |= GENHD_FL_EXT_DEVT | GENHD_FL_SUPPRESS_PARTITION_INFO;
@@ -704,49 +1744,86 @@ static int null_gendisk_register(struct nullb *nullb)
return 0;
}
-static int null_init_tag_set(struct blk_mq_tag_set *set)
+static int null_init_tag_set(struct nullb *nullb, struct blk_mq_tag_set *set)
{
set->ops = &null_mq_ops;
- set->nr_hw_queues = submit_queues;
- set->queue_depth = hw_queue_depth;
- set->numa_node = home_node;
+ set->nr_hw_queues = nullb ? nullb->dev->submit_queues :
+ g_submit_queues;
+ set->queue_depth = nullb ? nullb->dev->hw_queue_depth :
+ g_hw_queue_depth;
+ set->numa_node = nullb ? nullb->dev->home_node : g_home_node;
set->cmd_size = sizeof(struct nullb_cmd);
set->flags = BLK_MQ_F_SHOULD_MERGE;
set->driver_data = NULL;
- if (blocking)
+ if ((nullb && nullb->dev->blocking) || g_blocking)
set->flags |= BLK_MQ_F_BLOCKING;
return blk_mq_alloc_tag_set(set);
}
-static int null_add_dev(void)
+static void null_validate_conf(struct nullb_device *dev)
+{
+ dev->blocksize = round_down(dev->blocksize, 512);
+ dev->blocksize = clamp_t(unsigned int, dev->blocksize, 512, 4096);
+ if (dev->use_lightnvm && dev->blocksize != 4096)
+ dev->blocksize = 4096;
+
+ if (dev->use_lightnvm && dev->queue_mode != NULL_Q_MQ)
+ dev->queue_mode = NULL_Q_MQ;
+
+ if (dev->queue_mode == NULL_Q_MQ && dev->use_per_node_hctx) {
+ if (dev->submit_queues != nr_online_nodes)
+ dev->submit_queues = nr_online_nodes;
+ } else if (dev->submit_queues > nr_cpu_ids)
+ dev->submit_queues = nr_cpu_ids;
+ else if (dev->submit_queues == 0)
+ dev->submit_queues = 1;
+
+ dev->queue_mode = min_t(unsigned int, dev->queue_mode, NULL_Q_MQ);
+ dev->irqmode = min_t(unsigned int, dev->irqmode, NULL_IRQ_TIMER);
+
+ /* Do memory allocation, so set blocking */
+ if (dev->memory_backed)
+ dev->blocking = true;
+ else /* cache is meaningless */
+ dev->cache_size = 0;
+ dev->cache_size = min_t(unsigned long, ULONG_MAX / 1024 / 1024,
+ dev->cache_size);
+ dev->mbps = min_t(unsigned int, 1024 * 40, dev->mbps);
+ /* can not stop a queue */
+ if (dev->queue_mode == NULL_Q_BIO)
+ dev->mbps = 0;
+}
+
+static int null_add_dev(struct nullb_device *dev)
{
struct nullb *nullb;
int rv;
- nullb = kzalloc_node(sizeof(*nullb), GFP_KERNEL, home_node);
+ null_validate_conf(dev);
+
+ nullb = kzalloc_node(sizeof(*nullb), GFP_KERNEL, dev->home_node);
if (!nullb) {
rv = -ENOMEM;
goto out;
}
+ nullb->dev = dev;
+ dev->nullb = nullb;
spin_lock_init(&nullb->lock);
- if (queue_mode == NULL_Q_MQ && use_per_node_hctx)
- submit_queues = nr_online_nodes;
-
rv = setup_queues(nullb);
if (rv)
goto out_free_nullb;
- if (queue_mode == NULL_Q_MQ) {
+ if (dev->queue_mode == NULL_Q_MQ) {
if (shared_tags) {
nullb->tag_set = &tag_set;
rv = 0;
} else {
nullb->tag_set = &nullb->__tag_set;
- rv = null_init_tag_set(nullb->tag_set);
+ rv = null_init_tag_set(nullb, nullb->tag_set);
}
if (rv)
@@ -758,8 +1835,8 @@ static int null_add_dev(void)
goto out_cleanup_tags;
}
null_init_queues(nullb);
- } else if (queue_mode == NULL_Q_BIO) {
- nullb->q = blk_alloc_queue_node(GFP_KERNEL, home_node);
+ } else if (dev->queue_mode == NULL_Q_BIO) {
+ nullb->q = blk_alloc_queue_node(GFP_KERNEL, dev->home_node);
if (!nullb->q) {
rv = -ENOMEM;
goto out_cleanup_queues;
@@ -769,7 +1846,8 @@ static int null_add_dev(void)
if (rv)
goto out_cleanup_blk_queue;
} else {
- nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock, home_node);
+ nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock,
+ dev->home_node);
if (!nullb->q) {
rv = -ENOMEM;
goto out_cleanup_queues;
@@ -781,20 +1859,34 @@ static int null_add_dev(void)
goto out_cleanup_blk_queue;
}
+ if (dev->mbps) {
+ set_bit(NULLB_DEV_FL_THROTTLED, &dev->flags);
+ nullb_setup_bwtimer(nullb);
+ }
+
+ if (dev->cache_size > 0) {
+ set_bit(NULLB_DEV_FL_CACHE, &nullb->dev->flags);
+ blk_queue_write_cache(nullb->q, true, true);
+ blk_queue_flush_queueable(nullb->q, true);
+ }
+
nullb->q->queuedata = nullb;
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q);
queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, nullb->q);
mutex_lock(&lock);
- nullb->index = nullb_indexes++;
+ nullb->index = ida_simple_get(&nullb_indexes, 0, 0, GFP_KERNEL);
+ dev->index = nullb->index;
mutex_unlock(&lock);
- blk_queue_logical_block_size(nullb->q, bs);
- blk_queue_physical_block_size(nullb->q, bs);
+ blk_queue_logical_block_size(nullb->q, dev->blocksize);
+ blk_queue_physical_block_size(nullb->q, dev->blocksize);
+
+ null_config_discard(nullb);
sprintf(nullb->disk_name, "nullb%d", nullb->index);
- if (use_lightnvm)
+ if (dev->use_lightnvm)
rv = null_nvm_register(nullb);
else
rv = null_gendisk_register(nullb);
@@ -810,7 +1902,7 @@ static int null_add_dev(void)
out_cleanup_blk_queue:
blk_cleanup_queue(nullb->q);
out_cleanup_tags:
- if (queue_mode == NULL_Q_MQ && nullb->tag_set == &nullb->__tag_set)
+ if (dev->queue_mode == NULL_Q_MQ && nullb->tag_set == &nullb->__tag_set)
blk_mq_free_tag_set(nullb->tag_set);
out_cleanup_queues:
cleanup_queues(nullb);
@@ -825,51 +1917,63 @@ static int __init null_init(void)
int ret = 0;
unsigned int i;
struct nullb *nullb;
+ struct nullb_device *dev;
+
+ /* check for nullb_page.bitmap */
+ if (sizeof(unsigned long) * 8 - 2 < (PAGE_SIZE >> SECTOR_SHIFT))
+ return -EINVAL;
- if (bs > PAGE_SIZE) {
+ if (g_bs > PAGE_SIZE) {
pr_warn("null_blk: invalid block size\n");
pr_warn("null_blk: defaults block size to %lu\n", PAGE_SIZE);
- bs = PAGE_SIZE;
+ g_bs = PAGE_SIZE;
}
- if (use_lightnvm && bs != 4096) {
+ if (g_use_lightnvm && g_bs != 4096) {
pr_warn("null_blk: LightNVM only supports 4k block size\n");
pr_warn("null_blk: defaults block size to 4k\n");
- bs = 4096;
+ g_bs = 4096;
}
- if (use_lightnvm && queue_mode != NULL_Q_MQ) {
+ if (g_use_lightnvm && g_queue_mode != NULL_Q_MQ) {
pr_warn("null_blk: LightNVM only supported for blk-mq\n");
pr_warn("null_blk: defaults queue mode to blk-mq\n");
- queue_mode = NULL_Q_MQ;
+ g_queue_mode = NULL_Q_MQ;
}
- if (queue_mode == NULL_Q_MQ && use_per_node_hctx) {
- if (submit_queues < nr_online_nodes) {
- pr_warn("null_blk: submit_queues param is set to %u.",
+ if (g_queue_mode == NULL_Q_MQ && g_use_per_node_hctx) {
+ if (g_submit_queues != nr_online_nodes) {
+ pr_warn("null_blk: submit_queues param is set to %u.\n",
nr_online_nodes);
- submit_queues = nr_online_nodes;
+ g_submit_queues = nr_online_nodes;
}
- } else if (submit_queues > nr_cpu_ids)
- submit_queues = nr_cpu_ids;
- else if (!submit_queues)
- submit_queues = 1;
+ } else if (g_submit_queues > nr_cpu_ids)
+ g_submit_queues = nr_cpu_ids;
+ else if (g_submit_queues <= 0)
+ g_submit_queues = 1;
- if (queue_mode == NULL_Q_MQ && shared_tags) {
- ret = null_init_tag_set(&tag_set);
+ if (g_queue_mode == NULL_Q_MQ && shared_tags) {
+ ret = null_init_tag_set(NULL, &tag_set);
if (ret)
return ret;
}
+ config_group_init(&nullb_subsys.su_group);
+ mutex_init(&nullb_subsys.su_mutex);
+
+ ret = configfs_register_subsystem(&nullb_subsys);
+ if (ret)
+ goto err_tagset;
+
mutex_init(&lock);
null_major = register_blkdev(0, "nullb");
if (null_major < 0) {
ret = null_major;
- goto err_tagset;
+ goto err_conf;
}
- if (use_lightnvm) {
+ if (g_use_lightnvm) {
ppa_cache = kmem_cache_create("ppa_cache", 64 * sizeof(u64),
0, 0, NULL);
if (!ppa_cache) {
@@ -880,9 +1984,14 @@ static int __init null_init(void)
}
for (i = 0; i < nr_devices; i++) {
- ret = null_add_dev();
- if (ret)
+ dev = null_alloc_dev();
+ if (!dev)
+ goto err_dev;
+ ret = null_add_dev(dev);
+ if (ret) {
+ null_free_dev(dev);
goto err_dev;
+ }
}
pr_info("null: module loaded\n");
@@ -891,13 +2000,17 @@ static int __init null_init(void)
err_dev:
while (!list_empty(&nullb_list)) {
nullb = list_entry(nullb_list.next, struct nullb, list);
+ dev = nullb->dev;
null_del_dev(nullb);
+ null_free_dev(dev);
}
kmem_cache_destroy(ppa_cache);
err_ppa:
unregister_blkdev(null_major, "nullb");
+err_conf:
+ configfs_unregister_subsystem(&nullb_subsys);
err_tagset:
- if (queue_mode == NULL_Q_MQ && shared_tags)
+ if (g_queue_mode == NULL_Q_MQ && shared_tags)
blk_mq_free_tag_set(&tag_set);
return ret;
}
@@ -906,16 +2019,22 @@ static void __exit null_exit(void)
{
struct nullb *nullb;
+ configfs_unregister_subsystem(&nullb_subsys);
+
unregister_blkdev(null_major, "nullb");
mutex_lock(&lock);
while (!list_empty(&nullb_list)) {
+ struct nullb_device *dev;
+
nullb = list_entry(nullb_list.next, struct nullb, list);
+ dev = nullb->dev;
null_del_dev(nullb);
+ null_free_dev(dev);
}
mutex_unlock(&lock);
- if (queue_mode == NULL_Q_MQ && shared_tags)
+ if (g_queue_mode == NULL_Q_MQ && shared_tags)
blk_mq_free_tag_set(&tag_set);
kmem_cache_destroy(ppa_cache);
@@ -924,5 +2043,5 @@ static void __exit null_exit(void)
module_init(null_init);
module_exit(null_exit);
-MODULE_AUTHOR("Jens Axboe <jaxboe@fusionio.com>");
+MODULE_AUTHOR("Jens Axboe <axboe@kernel.dk>");
MODULE_LICENSE("GPL");
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 6b8b097abbb9..67974796c350 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -1028,7 +1028,7 @@ static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt)
bio = pkt->r_bios[f];
bio_reset(bio);
bio->bi_iter.bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9);
- bio->bi_bdev = pd->bdev;
+ bio_set_dev(bio, pd->bdev);
bio->bi_end_io = pkt_end_io_read;
bio->bi_private = pkt;
@@ -1122,7 +1122,7 @@ static int pkt_start_recovery(struct packet_data *pkt)
pkt->sector = new_sector;
bio_reset(pkt->bio);
- pkt->bio->bi_bdev = pd->bdev;
+ bio_set_set(pkt->bio, pd->bdev);
bio_set_op_attrs(pkt->bio, REQ_OP_WRITE, 0);
pkt->bio->bi_iter.bi_sector = new_sector;
pkt->bio->bi_iter.bi_size = pkt->frames * CD_FRAMESIZE;
@@ -1267,7 +1267,7 @@ static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
bio_reset(pkt->w_bio);
pkt->w_bio->bi_iter.bi_sector = pkt->sector;
- pkt->w_bio->bi_bdev = pd->bdev;
+ bio_set_dev(pkt->w_bio, pd->bdev);
pkt->w_bio->bi_end_io = pkt_end_io_packet_write;
pkt->w_bio->bi_private = pkt;
@@ -2314,7 +2314,7 @@ static void pkt_make_request_read(struct pktcdvd_device *pd, struct bio *bio)
psd->pd = pd;
psd->bio = bio;
- cloned_bio->bi_bdev = pd->bdev;
+ bio_set_dev(cloned_bio, pd->bdev);
cloned_bio->bi_private = psd;
cloned_bio->bi_end_io = pkt_end_io_read_cloned;
pd->stats.secs_r += bio_sectors(bio);
@@ -2415,8 +2415,7 @@ static blk_qc_t pkt_make_request(struct request_queue *q, struct bio *bio)
pd = q->queuedata;
if (!pd) {
- pr_err("%s incorrect request queue\n",
- bdevname(bio->bi_bdev, b));
+ pr_err("%s incorrect request queue\n", bio_devname(bio, b));
goto end_io;
}
diff --git a/drivers/block/ps3vram.c b/drivers/block/ps3vram.c
index e0e81cacd781..6a55959cbf78 100644
--- a/drivers/block/ps3vram.c
+++ b/drivers/block/ps3vram.c
@@ -409,10 +409,8 @@ static int ps3vram_cache_init(struct ps3_system_bus_device *dev)
priv->cache.page_size = CACHE_PAGE_SIZE;
priv->cache.tags = kzalloc(sizeof(struct ps3vram_tag) *
CACHE_PAGE_COUNT, GFP_KERNEL);
- if (priv->cache.tags == NULL) {
- dev_err(&dev->core, "Could not allocate cache tags\n");
+ if (!priv->cache.tags)
return -ENOMEM;
- }
dev_info(&dev->core, "Created ram cache: %d entries, %d KiB each\n",
CACHE_PAGE_COUNT, CACHE_PAGE_SIZE / 1024);
@@ -743,7 +741,11 @@ static int ps3vram_probe(struct ps3_system_bus_device *dev)
goto out_unmap_reports;
}
- ps3vram_cache_init(dev);
+ error = ps3vram_cache_init(dev);
+ if (error < 0) {
+ goto out_unmap_reports;
+ }
+
ps3vram_proc_init(dev);
queue = blk_alloc_queue(GFP_KERNEL);
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index b008b6a98098..b640ad8a6d20 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -3435,7 +3435,7 @@ static void rbd_acquire_lock(struct work_struct *work)
struct rbd_device *rbd_dev = container_of(to_delayed_work(work),
struct rbd_device, lock_dwork);
enum rbd_lock_state lock_state;
- int ret;
+ int ret = 0;
dout("%s rbd_dev %p\n", __func__, rbd_dev);
again:
diff --git a/drivers/block/rsxx/dev.c b/drivers/block/rsxx/dev.c
index 7f4acebf4657..e397d3ee7308 100644
--- a/drivers/block/rsxx/dev.c
+++ b/drivers/block/rsxx/dev.c
@@ -112,7 +112,7 @@ static const struct block_device_operations rsxx_fops = {
static void disk_stats_start(struct rsxx_cardinfo *card, struct bio *bio)
{
- generic_start_io_acct(bio_data_dir(bio), bio_sectors(bio),
+ generic_start_io_acct(card->queue, bio_data_dir(bio), bio_sectors(bio),
&card->gendisk->part0);
}
@@ -120,8 +120,8 @@ static void disk_stats_complete(struct rsxx_cardinfo *card,
struct bio *bio,
unsigned long start_time)
{
- generic_end_io_acct(bio_data_dir(bio), &card->gendisk->part0,
- start_time);
+ generic_end_io_acct(card->queue, bio_data_dir(bio),
+ &card->gendisk->part0, start_time);
}
static void bio_dma_done_cb(struct rsxx_cardinfo *card,
diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c
index d0368682bd43..7cedb4295e9d 100644
--- a/drivers/block/skd_main.c
+++ b/drivers/block/skd_main.c
@@ -1,19 +1,12 @@
-/* Copyright 2012 STEC, Inc.
+/*
+ * Driver for sTec s1120 PCIe SSDs. sTec was acquired in 2013 by HGST and HGST
+ * was acquired by Western Digital in 2012.
+ *
+ * Copyright 2012 sTec, Inc.
+ * Copyright (c) 2017 Western Digital Corporation or its affiliates.
*
- * This file is licensed under the terms of the 3-clause
- * BSD License (http://opensource.org/licenses/BSD-3-Clause)
- * or the GNU GPL-2.0 (http://www.gnu.org/licenses/gpl-2.0.html),
- * at your option. Both licenses are also available in the LICENSE file
- * distributed with this project. This file may not be copied, modified,
- * or distributed except in accordance with those terms.
- * Gordoni Waidhofer <gwaidhofer@stec-inc.com>
- * Initial Driver Design!
- * Thomas Swann <tswann@stec-inc.com>
- * Interrupt handling.
- * Ramprasad Chinthekindi <rchinthekindi@stec-inc.com>
- * biomode implementation.
- * Akhil Bhansali <abhansali@stec-inc.com>
- * Added support for DISCARD / FLUSH and FUA.
+ * This file is part of the Linux kernel, and is made available under
+ * the terms of the GNU General Public License version 2.
*/
#include <linux/kernel.h>
@@ -23,11 +16,11 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/blkdev.h>
+#include <linux/blk-mq.h>
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/compiler.h>
#include <linux/workqueue.h>
-#include <linux/bitops.h>
#include <linux/delay.h>
#include <linux/time.h>
#include <linux/hdreg.h>
@@ -37,9 +30,9 @@
#include <linux/version.h>
#include <linux/err.h>
#include <linux/aer.h>
-#include <linux/ctype.h>
#include <linux/wait.h>
-#include <linux/uio.h>
+#include <linux/stringify.h>
+#include <linux/slab_def.h>
#include <scsi/scsi.h>
#include <scsi/sg.h>
#include <linux/io.h>
@@ -51,19 +44,6 @@
static int skd_dbg_level;
static int skd_isr_comp_limit = 4;
-enum {
- STEC_LINK_2_5GTS = 0,
- STEC_LINK_5GTS = 1,
- STEC_LINK_8GTS = 2,
- STEC_LINK_UNKNOWN = 0xFF
-};
-
-enum {
- SKD_FLUSH_INITIALIZER,
- SKD_FLUSH_ZERO_SIZE_FIRST,
- SKD_FLUSH_DATA_SECOND,
-};
-
#define SKD_ASSERT(expr) \
do { \
if (unlikely(!(expr))) { \
@@ -73,17 +53,11 @@ enum {
} while (0)
#define DRV_NAME "skd"
-#define DRV_VERSION "2.2.1"
-#define DRV_BUILD_ID "0260"
#define PFX DRV_NAME ": "
-#define DRV_BIN_VERSION 0x100
-#define DRV_VER_COMPL "2.2.1." DRV_BUILD_ID
-MODULE_AUTHOR("bug-reports: support@stec-inc.com");
-MODULE_LICENSE("Dual BSD/GPL");
+MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("STEC s1120 PCIe SSD block driver (b" DRV_BUILD_ID ")");
-MODULE_VERSION(DRV_VERSION "-" DRV_BUILD_ID);
+MODULE_DESCRIPTION("STEC s1120 PCIe SSD block driver");
#define PCI_VENDOR_ID_STEC 0x1B39
#define PCI_DEVICE_ID_S1120 0x0001
@@ -96,34 +70,32 @@ MODULE_VERSION(DRV_VERSION "-" DRV_BUILD_ID);
#define SKD_PAUSE_TIMEOUT (5 * 1000)
#define SKD_N_FITMSG_BYTES (512u)
+#define SKD_MAX_REQ_PER_MSG 14
-#define SKD_N_SPECIAL_CONTEXT 32u
#define SKD_N_SPECIAL_FITMSG_BYTES (128u)
/* SG elements are 32 bytes, so we can make this 4096 and still be under the
* 128KB limit. That allows 4096*4K = 16M xfer size
*/
#define SKD_N_SG_PER_REQ_DEFAULT 256u
-#define SKD_N_SG_PER_SPECIAL 256u
#define SKD_N_COMPLETION_ENTRY 256u
#define SKD_N_READ_CAP_BYTES (8u)
#define SKD_N_INTERNAL_BYTES (512u)
+#define SKD_SKCOMP_SIZE \
+ ((sizeof(struct fit_completion_entry_v1) + \
+ sizeof(struct fit_comp_error_info)) * SKD_N_COMPLETION_ENTRY)
+
/* 5 bits of uniqifier, 0xF800 */
-#define SKD_ID_INCR (0x400)
#define SKD_ID_TABLE_MASK (3u << 8u)
#define SKD_ID_RW_REQUEST (0u << 8u)
#define SKD_ID_INTERNAL (1u << 8u)
-#define SKD_ID_SPECIAL_REQUEST (2u << 8u)
#define SKD_ID_FIT_MSG (3u << 8u)
#define SKD_ID_SLOT_MASK 0x00FFu
#define SKD_ID_SLOT_AND_TABLE_MASK 0x03FFu
-#define SKD_N_TIMEOUT_SLOT 4u
-#define SKD_TIMEOUT_SLOT_MASK 3u
-
#define SKD_N_MAX_SECTORS 2048u
#define SKD_MAX_RETRIES 2u
@@ -141,7 +113,6 @@ enum skd_drvr_state {
SKD_DRVR_STATE_ONLINE,
SKD_DRVR_STATE_PAUSING,
SKD_DRVR_STATE_PAUSED,
- SKD_DRVR_STATE_DRAINING_TIMEOUT,
SKD_DRVR_STATE_RESTARTING,
SKD_DRVR_STATE_RESUMING,
SKD_DRVR_STATE_STOPPING,
@@ -158,7 +129,6 @@ enum skd_drvr_state {
#define SKD_WAIT_BOOT_TIMO SKD_TIMER_SECONDS(90u)
#define SKD_STARTING_TIMO SKD_TIMER_SECONDS(8u)
#define SKD_RESTARTING_TIMO SKD_TIMER_MINUTES(4u)
-#define SKD_DRAINING_TIMO SKD_TIMER_SECONDS(6u)
#define SKD_BUSY_TIMO SKD_TIMER_MINUTES(20u)
#define SKD_STARTED_BUSY_TIMO SKD_TIMER_SECONDS(60u)
#define SKD_START_WAIT_SECONDS 90u
@@ -169,12 +139,6 @@ enum skd_req_state {
SKD_REQ_STATE_BUSY,
SKD_REQ_STATE_COMPLETED,
SKD_REQ_STATE_TIMEOUT,
- SKD_REQ_STATE_ABORTED,
-};
-
-enum skd_fit_msg_state {
- SKD_MSG_STATE_IDLE,
- SKD_MSG_STATE_BUSY,
};
enum skd_check_status_action {
@@ -185,34 +149,29 @@ enum skd_check_status_action {
SKD_CHECK_STATUS_BUSY_IMMINENT,
};
-struct skd_fitmsg_context {
- enum skd_fit_msg_state state;
-
- struct skd_fitmsg_context *next;
+struct skd_msg_buf {
+ struct fit_msg_hdr fmh;
+ struct skd_scsi_request scsi[SKD_MAX_REQ_PER_MSG];
+};
+struct skd_fitmsg_context {
u32 id;
- u16 outstanding;
u32 length;
- u32 offset;
- u8 *msg_buf;
+ struct skd_msg_buf *msg_buf;
dma_addr_t mb_dma_address;
};
struct skd_request_context {
enum skd_req_state state;
- struct skd_request_context *next;
-
u16 id;
u32 fitmsg_id;
- struct request *req;
u8 flush_cmd;
- u32 timeout_stamp;
- u8 sg_data_dir;
+ enum dma_data_direction data_dir;
struct scatterlist *sg;
u32 n_sg;
u32 sg_byte_count;
@@ -224,38 +183,19 @@ struct skd_request_context {
struct fit_comp_error_info err_info;
+ blk_status_t status;
};
-#define SKD_DATA_DIR_HOST_TO_CARD 1
-#define SKD_DATA_DIR_CARD_TO_HOST 2
struct skd_special_context {
struct skd_request_context req;
- u8 orphaned;
-
void *data_buf;
dma_addr_t db_dma_address;
- u8 *msg_buf;
+ struct skd_msg_buf *msg_buf;
dma_addr_t mb_dma_address;
};
-struct skd_sg_io {
- fmode_t mode;
- void __user *argp;
-
- struct sg_io_hdr sg;
-
- u8 cdb[16];
-
- u32 dxfer_len;
- u32 iovcnt;
- struct sg_iovec *iov;
- struct sg_iovec no_iov_iov;
-
- struct skd_special_context *skspcl;
-};
-
typedef enum skd_irq_type {
SKD_IRQ_LEGACY,
SKD_IRQ_MSI,
@@ -265,7 +205,7 @@ typedef enum skd_irq_type {
#define SKD_MAX_BARS 2
struct skd_device {
- volatile void __iomem *mem_map[SKD_MAX_BARS];
+ void __iomem *mem_map[SKD_MAX_BARS];
resource_size_t mem_phys[SKD_MAX_BARS];
u32 mem_size[SKD_MAX_BARS];
@@ -276,21 +216,20 @@ struct skd_device {
spinlock_t lock;
struct gendisk *disk;
+ struct blk_mq_tag_set tag_set;
struct request_queue *queue;
+ struct skd_fitmsg_context *skmsg;
struct device *class_dev;
int gendisk_on;
int sync_done;
- atomic_t device_count;
u32 devno;
u32 major;
- char name[32];
char isr_name[30];
enum skd_drvr_state state;
u32 drive_state;
- u32 in_flight;
u32 cur_max_queue_depth;
u32 queue_low_water_mark;
u32 dev_max_queue_depth;
@@ -298,27 +237,20 @@ struct skd_device {
u32 num_fitmsg_context;
u32 num_req_context;
- u32 timeout_slot[SKD_N_TIMEOUT_SLOT];
- u32 timeout_stamp;
- struct skd_fitmsg_context *skmsg_free_list;
struct skd_fitmsg_context *skmsg_table;
- struct skd_request_context *skreq_free_list;
- struct skd_request_context *skreq_table;
-
- struct skd_special_context *skspcl_free_list;
- struct skd_special_context *skspcl_table;
-
struct skd_special_context internal_skspcl;
u32 read_cap_blocksize;
u32 read_cap_last_lba;
int read_cap_is_valid;
int inquiry_is_valid;
u8 inq_serial_num[13]; /*12 chars plus null term */
- u8 id_str[80]; /* holds a composite name (pci + sernum) */
u8 skcomp_cycle;
u32 skcomp_ix;
+ struct kmem_cache *msgbuf_cache;
+ struct kmem_cache *sglist_cache;
+ struct kmem_cache *databuf_cache;
struct fit_completion_entry_v1 *skcomp_table;
struct fit_comp_error_info *skerr_table;
dma_addr_t cq_dma_address;
@@ -329,7 +261,6 @@ struct skd_device {
u32 timer_countdown;
u32 timer_substate;
- int n_special;
int sgs_per_request;
u32 last_mtd;
@@ -343,7 +274,7 @@ struct skd_device {
u32 timo_slot;
-
+ struct work_struct start_queue;
struct work_struct completion_worker;
};
@@ -353,53 +284,32 @@ struct skd_device {
static inline u32 skd_reg_read32(struct skd_device *skdev, u32 offset)
{
- u32 val;
-
- if (likely(skdev->dbg_level < 2))
- return readl(skdev->mem_map[1] + offset);
- else {
- barrier();
- val = readl(skdev->mem_map[1] + offset);
- barrier();
- pr_debug("%s:%s:%d offset %x = %x\n",
- skdev->name, __func__, __LINE__, offset, val);
- return val;
- }
+ u32 val = readl(skdev->mem_map[1] + offset);
+ if (unlikely(skdev->dbg_level >= 2))
+ dev_dbg(&skdev->pdev->dev, "offset %x = %x\n", offset, val);
+ return val;
}
static inline void skd_reg_write32(struct skd_device *skdev, u32 val,
u32 offset)
{
- if (likely(skdev->dbg_level < 2)) {
- writel(val, skdev->mem_map[1] + offset);
- barrier();
- } else {
- barrier();
- writel(val, skdev->mem_map[1] + offset);
- barrier();
- pr_debug("%s:%s:%d offset %x = %x\n",
- skdev->name, __func__, __LINE__, offset, val);
- }
+ writel(val, skdev->mem_map[1] + offset);
+ if (unlikely(skdev->dbg_level >= 2))
+ dev_dbg(&skdev->pdev->dev, "offset %x = %x\n", offset, val);
}
static inline void skd_reg_write64(struct skd_device *skdev, u64 val,
u32 offset)
{
- if (likely(skdev->dbg_level < 2)) {
- writeq(val, skdev->mem_map[1] + offset);
- barrier();
- } else {
- barrier();
- writeq(val, skdev->mem_map[1] + offset);
- barrier();
- pr_debug("%s:%s:%d offset %x = %016llx\n",
- skdev->name, __func__, __LINE__, offset, val);
- }
+ writeq(val, skdev->mem_map[1] + offset);
+ if (unlikely(skdev->dbg_level >= 2))
+ dev_dbg(&skdev->pdev->dev, "offset %x = %016llx\n", offset,
+ val);
}
-#define SKD_IRQ_DEFAULT SKD_IRQ_MSI
+#define SKD_IRQ_DEFAULT SKD_IRQ_MSIX
static int skd_isr_type = SKD_IRQ_DEFAULT;
module_param(skd_isr_type, int, 0444);
@@ -412,7 +322,7 @@ static int skd_max_req_per_msg = SKD_MAX_REQ_PER_MSG_DEFAULT;
module_param(skd_max_req_per_msg, int, 0444);
MODULE_PARM_DESC(skd_max_req_per_msg,
"Maximum SCSI requests packed in a single message."
- " (1-14, default==1)");
+ " (1-" __stringify(SKD_MAX_REQ_PER_MSG) ", default==1)");
#define SKD_MAX_QUEUE_DEPTH_DEFAULT 64
#define SKD_MAX_QUEUE_DEPTH_DEFAULT_STR "64"
@@ -429,10 +339,10 @@ MODULE_PARM_DESC(skd_sgs_per_request,
"Maximum SG elements per block request."
" (1-4096, default==256)");
-static int skd_max_pass_thru = SKD_N_SPECIAL_CONTEXT;
+static int skd_max_pass_thru = 1;
module_param(skd_max_pass_thru, int, 0444);
MODULE_PARM_DESC(skd_max_pass_thru,
- "Maximum SCSI pass-thru at a time." " (1-50, default==32)");
+ "Maximum SCSI pass-thru at a time. IGNORED");
module_param(skd_dbg_level, int, 0444);
MODULE_PARM_DESC(skd_dbg_level, "s1120 debug level (0,1,2)");
@@ -449,9 +359,6 @@ static void skd_send_fitmsg(struct skd_device *skdev,
struct skd_fitmsg_context *skmsg);
static void skd_send_special_fitmsg(struct skd_device *skdev,
struct skd_special_context *skspcl);
-static void skd_request_fn(struct request_queue *rq);
-static void skd_end_request(struct skd_device *skdev,
- struct skd_request_context *skreq, blk_status_t status);
static bool skd_preop_sg_list(struct skd_device *skdev,
struct skd_request_context *skreq);
static void skd_postop_sg_list(struct skd_device *skdev,
@@ -460,19 +367,14 @@ static void skd_postop_sg_list(struct skd_device *skdev,
static void skd_restart_device(struct skd_device *skdev);
static int skd_quiesce_dev(struct skd_device *skdev);
static int skd_unquiesce_dev(struct skd_device *skdev);
-static void skd_release_special(struct skd_device *skdev,
- struct skd_special_context *skspcl);
static void skd_disable_interrupts(struct skd_device *skdev);
static void skd_isr_fwstate(struct skd_device *skdev);
-static void skd_recover_requests(struct skd_device *skdev, int requeue);
+static void skd_recover_requests(struct skd_device *skdev);
static void skd_soft_reset(struct skd_device *skdev);
-static const char *skd_name(struct skd_device *skdev);
const char *skd_drive_state_to_str(int state);
const char *skd_skdev_state_to_str(enum skd_drvr_state state);
static void skd_log_skdev(struct skd_device *skdev, const char *event);
-static void skd_log_skmsg(struct skd_device *skdev,
- struct skd_fitmsg_context *skmsg, const char *event);
static void skd_log_skreq(struct skd_device *skdev,
struct skd_request_context *skreq, const char *event);
@@ -481,18 +383,20 @@ static void skd_log_skreq(struct skd_device *skdev,
* READ/WRITE REQUESTS
*****************************************************************************
*/
-static void skd_fail_all_pending(struct skd_device *skdev)
+static void skd_inc_in_flight(struct request *rq, void *data, bool reserved)
{
- struct request_queue *q = skdev->queue;
- struct request *req;
+ int *count = data;
- for (;; ) {
- req = blk_peek_request(q);
- if (req == NULL)
- break;
- blk_start_request(req);
- __blk_end_request_all(req, BLK_STS_IOERR);
- }
+ count++;
+}
+
+static int skd_in_flight(struct skd_device *skdev)
+{
+ int count = 0;
+
+ blk_mq_tagset_busy_iter(&skdev->tag_set, skd_inc_in_flight, &count);
+
+ return count;
}
static void
@@ -501,9 +405,9 @@ skd_prep_rw_cdb(struct skd_scsi_request *scsi_req,
unsigned count)
{
if (data_dir == READ)
- scsi_req->cdb[0] = 0x28;
+ scsi_req->cdb[0] = READ_10;
else
- scsi_req->cdb[0] = 0x2a;
+ scsi_req->cdb[0] = WRITE_10;
scsi_req->cdb[1] = 0;
scsi_req->cdb[2] = (lba & 0xff000000) >> 24;
@@ -522,7 +426,7 @@ skd_prep_zerosize_flush_cdb(struct skd_scsi_request *scsi_req,
{
skreq->flush_cmd = 1;
- scsi_req->cdb[0] = 0x35;
+ scsi_req->cdb[0] = SYNCHRONIZE_CACHE;
scsi_req->cdb[1] = 0;
scsi_req->cdb[2] = 0;
scsi_req->cdb[3] = 0;
@@ -534,307 +438,194 @@ skd_prep_zerosize_flush_cdb(struct skd_scsi_request *scsi_req,
scsi_req->cdb[9] = 0;
}
-static void skd_request_fn_not_online(struct request_queue *q);
-
-static void skd_request_fn(struct request_queue *q)
+/*
+ * Return true if and only if all pending requests should be failed.
+ */
+static bool skd_fail_all(struct request_queue *q)
{
struct skd_device *skdev = q->queuedata;
- struct skd_fitmsg_context *skmsg = NULL;
- struct fit_msg_hdr *fmh = NULL;
- struct skd_request_context *skreq;
- struct request *req = NULL;
- struct skd_scsi_request *scsi_req;
- unsigned long io_flags;
- u32 lba;
- u32 count;
- int data_dir;
- u32 be_lba;
- u32 be_count;
- u64 be_dmaa;
- u64 cmdctxt;
- u32 timo_slot;
- void *cmd_ptr;
- int flush, fua;
-
- if (skdev->state != SKD_DRVR_STATE_ONLINE) {
- skd_request_fn_not_online(q);
- return;
- }
- if (blk_queue_stopped(skdev->queue)) {
- if (skdev->skmsg_free_list == NULL ||
- skdev->skreq_free_list == NULL ||
- skdev->in_flight >= skdev->queue_low_water_mark)
- /* There is still some kind of shortage */
- return;
-
- queue_flag_clear(QUEUE_FLAG_STOPPED, skdev->queue);
- }
+ SKD_ASSERT(skdev->state != SKD_DRVR_STATE_ONLINE);
- /*
- * Stop conditions:
- * - There are no more native requests
- * - There are already the maximum number of requests in progress
- * - There are no more skd_request_context entries
- * - There are no more FIT msg buffers
+ skd_log_skdev(skdev, "req_not_online");
+ switch (skdev->state) {
+ case SKD_DRVR_STATE_PAUSING:
+ case SKD_DRVR_STATE_PAUSED:
+ case SKD_DRVR_STATE_STARTING:
+ case SKD_DRVR_STATE_RESTARTING:
+ case SKD_DRVR_STATE_WAIT_BOOT:
+ /* In case of starting, we haven't started the queue,
+ * so we can't get here... but requests are
+ * possibly hanging out waiting for us because we
+ * reported the dev/skd0 already. They'll wait
+ * forever if connect doesn't complete.
+ * What to do??? delay dev/skd0 ??
*/
- for (;; ) {
-
- flush = fua = 0;
-
- req = blk_peek_request(q);
-
- /* Are there any native requests to start? */
- if (req == NULL)
- break;
-
- lba = (u32)blk_rq_pos(req);
- count = blk_rq_sectors(req);
- data_dir = rq_data_dir(req);
- io_flags = req->cmd_flags;
-
- if (req_op(req) == REQ_OP_FLUSH)
- flush++;
-
- if (io_flags & REQ_FUA)
- fua++;
-
- pr_debug("%s:%s:%d new req=%p lba=%u(0x%x) "
- "count=%u(0x%x) dir=%d\n",
- skdev->name, __func__, __LINE__,
- req, lba, lba, count, count, data_dir);
-
- /* At this point we know there is a request */
+ case SKD_DRVR_STATE_BUSY:
+ case SKD_DRVR_STATE_BUSY_IMMINENT:
+ case SKD_DRVR_STATE_BUSY_ERASE:
+ return false;
- /* Are too many requets already in progress? */
- if (skdev->in_flight >= skdev->cur_max_queue_depth) {
- pr_debug("%s:%s:%d qdepth %d, limit %d\n",
- skdev->name, __func__, __LINE__,
- skdev->in_flight, skdev->cur_max_queue_depth);
- break;
- }
+ case SKD_DRVR_STATE_BUSY_SANITIZE:
+ case SKD_DRVR_STATE_STOPPING:
+ case SKD_DRVR_STATE_SYNCING:
+ case SKD_DRVR_STATE_FAULT:
+ case SKD_DRVR_STATE_DISAPPEARED:
+ default:
+ return true;
+ }
+}
- /* Is a skd_request_context available? */
- skreq = skdev->skreq_free_list;
- if (skreq == NULL) {
- pr_debug("%s:%s:%d Out of req=%p\n",
- skdev->name, __func__, __LINE__, q);
- break;
- }
- SKD_ASSERT(skreq->state == SKD_REQ_STATE_IDLE);
- SKD_ASSERT((skreq->id & SKD_ID_INCR) == 0);
-
- /* Now we check to see if we can get a fit msg */
- if (skmsg == NULL) {
- if (skdev->skmsg_free_list == NULL) {
- pr_debug("%s:%s:%d Out of msg\n",
- skdev->name, __func__, __LINE__);
- break;
- }
- }
+static blk_status_t skd_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *mqd)
+{
+ struct request *const req = mqd->rq;
+ struct request_queue *const q = req->q;
+ struct skd_device *skdev = q->queuedata;
+ struct skd_fitmsg_context *skmsg;
+ struct fit_msg_hdr *fmh;
+ const u32 tag = blk_mq_unique_tag(req);
+ struct skd_request_context *const skreq = blk_mq_rq_to_pdu(req);
+ struct skd_scsi_request *scsi_req;
+ unsigned long flags = 0;
+ const u32 lba = blk_rq_pos(req);
+ const u32 count = blk_rq_sectors(req);
+ const int data_dir = rq_data_dir(req);
- skreq->flush_cmd = 0;
- skreq->n_sg = 0;
- skreq->sg_byte_count = 0;
+ if (unlikely(skdev->state != SKD_DRVR_STATE_ONLINE))
+ return skd_fail_all(q) ? BLK_STS_IOERR : BLK_STS_RESOURCE;
- /*
- * OK to now dequeue request from q.
- *
- * At this point we are comitted to either start or reject
- * the native request. Note that skd_request_context is
- * available but is still at the head of the free list.
- */
- blk_start_request(req);
- skreq->req = req;
- skreq->fitmsg_id = 0;
-
- /* Either a FIT msg is in progress or we have to start one. */
- if (skmsg == NULL) {
- /* Are there any FIT msg buffers available? */
- skmsg = skdev->skmsg_free_list;
- if (skmsg == NULL) {
- pr_debug("%s:%s:%d Out of msg skdev=%p\n",
- skdev->name, __func__, __LINE__,
- skdev);
- break;
- }
- SKD_ASSERT(skmsg->state == SKD_MSG_STATE_IDLE);
- SKD_ASSERT((skmsg->id & SKD_ID_INCR) == 0);
+ blk_mq_start_request(req);
- skdev->skmsg_free_list = skmsg->next;
+ WARN_ONCE(tag >= skd_max_queue_depth, "%#x > %#x (nr_requests = %lu)\n",
+ tag, skd_max_queue_depth, q->nr_requests);
- skmsg->state = SKD_MSG_STATE_BUSY;
- skmsg->id += SKD_ID_INCR;
+ SKD_ASSERT(skreq->state == SKD_REQ_STATE_IDLE);
- /* Initialize the FIT msg header */
- fmh = (struct fit_msg_hdr *)skmsg->msg_buf;
- memset(fmh, 0, sizeof(*fmh));
- fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
- skmsg->length = sizeof(*fmh);
- }
+ dev_dbg(&skdev->pdev->dev,
+ "new req=%p lba=%u(0x%x) count=%u(0x%x) dir=%d\n", req, lba,
+ lba, count, count, data_dir);
- skreq->fitmsg_id = skmsg->id;
+ skreq->id = tag + SKD_ID_RW_REQUEST;
+ skreq->flush_cmd = 0;
+ skreq->n_sg = 0;
+ skreq->sg_byte_count = 0;
- /*
- * Note that a FIT msg may have just been started
- * but contains no SoFIT requests yet.
- */
+ skreq->fitmsg_id = 0;
- /*
- * Transcode the request, checking as we go. The outcome of
- * the transcoding is represented by the error variable.
- */
- cmd_ptr = &skmsg->msg_buf[skmsg->length];
- memset(cmd_ptr, 0, 32);
+ skreq->data_dir = data_dir == READ ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
- be_lba = cpu_to_be32(lba);
- be_count = cpu_to_be32(count);
- be_dmaa = cpu_to_be64((u64)skreq->sksg_dma_address);
- cmdctxt = skreq->id + SKD_ID_INCR;
+ if (req->bio && !skd_preop_sg_list(skdev, skreq)) {
+ dev_dbg(&skdev->pdev->dev, "error Out\n");
+ skreq->status = BLK_STS_RESOURCE;
+ blk_mq_complete_request(req);
+ return BLK_STS_OK;
+ }
- scsi_req = cmd_ptr;
- scsi_req->hdr.tag = cmdctxt;
- scsi_req->hdr.sg_list_dma_address = be_dmaa;
+ dma_sync_single_for_device(&skdev->pdev->dev, skreq->sksg_dma_address,
+ skreq->n_sg *
+ sizeof(struct fit_sg_descriptor),
+ DMA_TO_DEVICE);
- if (data_dir == READ)
- skreq->sg_data_dir = SKD_DATA_DIR_CARD_TO_HOST;
- else
- skreq->sg_data_dir = SKD_DATA_DIR_HOST_TO_CARD;
+ /* Either a FIT msg is in progress or we have to start one. */
+ if (skd_max_req_per_msg == 1) {
+ skmsg = NULL;
+ } else {
+ spin_lock_irqsave(&skdev->lock, flags);
+ skmsg = skdev->skmsg;
+ }
+ if (!skmsg) {
+ skmsg = &skdev->skmsg_table[tag];
+ skdev->skmsg = skmsg;
- if (flush == SKD_FLUSH_ZERO_SIZE_FIRST) {
- skd_prep_zerosize_flush_cdb(scsi_req, skreq);
- SKD_ASSERT(skreq->flush_cmd == 1);
+ /* Initialize the FIT msg header */
+ fmh = &skmsg->msg_buf->fmh;
+ memset(fmh, 0, sizeof(*fmh));
+ fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
+ skmsg->length = sizeof(*fmh);
+ } else {
+ fmh = &skmsg->msg_buf->fmh;
+ }
- } else {
- skd_prep_rw_cdb(scsi_req, data_dir, lba, count);
- }
+ skreq->fitmsg_id = skmsg->id;
- if (fua)
- scsi_req->cdb[1] |= SKD_FUA_NV;
+ scsi_req = &skmsg->msg_buf->scsi[fmh->num_protocol_cmds_coalesced];
+ memset(scsi_req, 0, sizeof(*scsi_req));
- if (!req->bio)
- goto skip_sg;
+ scsi_req->hdr.tag = skreq->id;
+ scsi_req->hdr.sg_list_dma_address =
+ cpu_to_be64(skreq->sksg_dma_address);
- if (!skd_preop_sg_list(skdev, skreq)) {
- /*
- * Complete the native request with error.
- * Note that the request context is still at the
- * head of the free list, and that the SoFIT request
- * was encoded into the FIT msg buffer but the FIT
- * msg length has not been updated. In short, the
- * only resource that has been allocated but might
- * not be used is that the FIT msg could be empty.
- */
- pr_debug("%s:%s:%d error Out\n",
- skdev->name, __func__, __LINE__);
- skd_end_request(skdev, skreq, BLK_STS_RESOURCE);
- continue;
- }
+ if (req_op(req) == REQ_OP_FLUSH) {
+ skd_prep_zerosize_flush_cdb(scsi_req, skreq);
+ SKD_ASSERT(skreq->flush_cmd == 1);
+ } else {
+ skd_prep_rw_cdb(scsi_req, data_dir, lba, count);
+ }
-skip_sg:
- scsi_req->hdr.sg_list_len_bytes =
- cpu_to_be32(skreq->sg_byte_count);
+ if (req->cmd_flags & REQ_FUA)
+ scsi_req->cdb[1] |= SKD_FUA_NV;
- /* Complete resource allocations. */
- skdev->skreq_free_list = skreq->next;
- skreq->state = SKD_REQ_STATE_BUSY;
- skreq->id += SKD_ID_INCR;
+ scsi_req->hdr.sg_list_len_bytes = cpu_to_be32(skreq->sg_byte_count);
- skmsg->length += sizeof(struct skd_scsi_request);
- fmh->num_protocol_cmds_coalesced++;
+ /* Complete resource allocations. */
+ skreq->state = SKD_REQ_STATE_BUSY;
- /*
- * Update the active request counts.
- * Capture the timeout timestamp.
- */
- skreq->timeout_stamp = skdev->timeout_stamp;
- timo_slot = skreq->timeout_stamp & SKD_TIMEOUT_SLOT_MASK;
- skdev->timeout_slot[timo_slot]++;
- skdev->in_flight++;
- pr_debug("%s:%s:%d req=0x%x busy=%d\n",
- skdev->name, __func__, __LINE__,
- skreq->id, skdev->in_flight);
+ skmsg->length += sizeof(struct skd_scsi_request);
+ fmh->num_protocol_cmds_coalesced++;
- /*
- * If the FIT msg buffer is full send it.
- */
- if (skmsg->length >= SKD_N_FITMSG_BYTES ||
- fmh->num_protocol_cmds_coalesced >= skd_max_req_per_msg) {
- skd_send_fitmsg(skdev, skmsg);
- skmsg = NULL;
- fmh = NULL;
- }
- }
+ dev_dbg(&skdev->pdev->dev, "req=0x%x busy=%d\n", skreq->id,
+ skd_in_flight(skdev));
/*
- * Is a FIT msg in progress? If it is empty put the buffer back
- * on the free list. If it is non-empty send what we got.
- * This minimizes latency when there are fewer requests than
- * what fits in a FIT msg.
+ * If the FIT msg buffer is full send it.
*/
- if (skmsg != NULL) {
- /* Bigger than just a FIT msg header? */
- if (skmsg->length > sizeof(struct fit_msg_hdr)) {
- pr_debug("%s:%s:%d sending msg=%p, len %d\n",
- skdev->name, __func__, __LINE__,
- skmsg, skmsg->length);
+ if (skd_max_req_per_msg == 1) {
+ skd_send_fitmsg(skdev, skmsg);
+ } else {
+ if (mqd->last ||
+ fmh->num_protocol_cmds_coalesced >= skd_max_req_per_msg) {
skd_send_fitmsg(skdev, skmsg);
- } else {
- /*
- * The FIT msg is empty. It means we got started
- * on the msg, but the requests were rejected.
- */
- skmsg->state = SKD_MSG_STATE_IDLE;
- skmsg->id += SKD_ID_INCR;
- skmsg->next = skdev->skmsg_free_list;
- skdev->skmsg_free_list = skmsg;
+ skdev->skmsg = NULL;
}
- skmsg = NULL;
- fmh = NULL;
+ spin_unlock_irqrestore(&skdev->lock, flags);
}
- /*
- * If req is non-NULL it means there is something to do but
- * we are out of a resource.
- */
- if (req)
- blk_stop_queue(skdev->queue);
+ return BLK_STS_OK;
}
-static void skd_end_request(struct skd_device *skdev,
- struct skd_request_context *skreq, blk_status_t error)
+static enum blk_eh_timer_return skd_timed_out(struct request *req,
+ bool reserved)
{
- if (unlikely(error)) {
- struct request *req = skreq->req;
- char *cmd = (rq_data_dir(req) == READ) ? "read" : "write";
- u32 lba = (u32)blk_rq_pos(req);
- u32 count = blk_rq_sectors(req);
-
- pr_err("(%s): Error cmd=%s sect=%u count=%u id=0x%x\n",
- skd_name(skdev), cmd, lba, count, skreq->id);
- } else
- pr_debug("%s:%s:%d id=0x%x error=%d\n",
- skdev->name, __func__, __LINE__, skreq->id, error);
+ struct skd_device *skdev = req->q->queuedata;
+
+ dev_err(&skdev->pdev->dev, "request with tag %#x timed out\n",
+ blk_mq_unique_tag(req));
- __blk_end_request_all(skreq->req, error);
+ return BLK_EH_RESET_TIMER;
+}
+
+static void skd_complete_rq(struct request *req)
+{
+ struct skd_request_context *skreq = blk_mq_rq_to_pdu(req);
+
+ blk_mq_end_request(req, skreq->status);
}
static bool skd_preop_sg_list(struct skd_device *skdev,
struct skd_request_context *skreq)
{
- struct request *req = skreq->req;
- int writing = skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD;
- int pci_dir = writing ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE;
- struct scatterlist *sg = &skreq->sg[0];
+ struct request *req = blk_mq_rq_from_pdu(skreq);
+ struct scatterlist *sgl = &skreq->sg[0], *sg;
int n_sg;
int i;
skreq->sg_byte_count = 0;
- /* SKD_ASSERT(skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD ||
- skreq->sg_data_dir == SKD_DATA_DIR_CARD_TO_HOST); */
+ WARN_ON_ONCE(skreq->data_dir != DMA_TO_DEVICE &&
+ skreq->data_dir != DMA_FROM_DEVICE);
- n_sg = blk_rq_map_sg(skdev->queue, req, sg);
+ n_sg = blk_rq_map_sg(skdev->queue, req, sgl);
if (n_sg <= 0)
return false;
@@ -842,7 +633,7 @@ static bool skd_preop_sg_list(struct skd_device *skdev,
* Map scatterlist to PCI bus addresses.
* Note PCI might change the number of entries.
*/
- n_sg = pci_map_sg(skdev->pdev, sg, n_sg, pci_dir);
+ n_sg = pci_map_sg(skdev->pdev, sgl, n_sg, skreq->data_dir);
if (n_sg <= 0)
return false;
@@ -850,10 +641,10 @@ static bool skd_preop_sg_list(struct skd_device *skdev,
skreq->n_sg = n_sg;
- for (i = 0; i < n_sg; i++) {
+ for_each_sg(sgl, sg, n_sg, i) {
struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
- u32 cnt = sg_dma_len(&sg[i]);
- uint64_t dma_addr = sg_dma_address(&sg[i]);
+ u32 cnt = sg_dma_len(sg);
+ uint64_t dma_addr = sg_dma_address(sg);
sgd->control = FIT_SGD_CONTROL_NOT_LAST;
sgd->byte_count = cnt;
@@ -866,16 +657,16 @@ static bool skd_preop_sg_list(struct skd_device *skdev,
skreq->sksg_list[n_sg - 1].control = FIT_SGD_CONTROL_LAST;
if (unlikely(skdev->dbg_level > 1)) {
- pr_debug("%s:%s:%d skreq=%x sksg_list=%p sksg_dma=%llx\n",
- skdev->name, __func__, __LINE__,
- skreq->id, skreq->sksg_list, skreq->sksg_dma_address);
+ dev_dbg(&skdev->pdev->dev,
+ "skreq=%x sksg_list=%p sksg_dma=%llx\n",
+ skreq->id, skreq->sksg_list, skreq->sksg_dma_address);
for (i = 0; i < n_sg; i++) {
struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
- pr_debug("%s:%s:%d sg[%d] count=%u ctrl=0x%x "
- "addr=0x%llx next=0x%llx\n",
- skdev->name, __func__, __LINE__,
- i, sgd->byte_count, sgd->control,
- sgd->host_side_addr, sgd->next_desc_ptr);
+
+ dev_dbg(&skdev->pdev->dev,
+ " sg[%d] count=%u ctrl=0x%x addr=0x%llx next=0x%llx\n",
+ i, sgd->byte_count, sgd->control,
+ sgd->host_side_addr, sgd->next_desc_ptr);
}
}
@@ -885,9 +676,6 @@ static bool skd_preop_sg_list(struct skd_device *skdev,
static void skd_postop_sg_list(struct skd_device *skdev,
struct skd_request_context *skreq)
{
- int writing = skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD;
- int pci_dir = writing ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE;
-
/*
* restore the next ptr for next IO request so we
* don't have to set it every time.
@@ -895,51 +683,7 @@ static void skd_postop_sg_list(struct skd_device *skdev,
skreq->sksg_list[skreq->n_sg - 1].next_desc_ptr =
skreq->sksg_dma_address +
((skreq->n_sg) * sizeof(struct fit_sg_descriptor));
- pci_unmap_sg(skdev->pdev, &skreq->sg[0], skreq->n_sg, pci_dir);
-}
-
-static void skd_request_fn_not_online(struct request_queue *q)
-{
- struct skd_device *skdev = q->queuedata;
- int error;
-
- SKD_ASSERT(skdev->state != SKD_DRVR_STATE_ONLINE);
-
- skd_log_skdev(skdev, "req_not_online");
- switch (skdev->state) {
- case SKD_DRVR_STATE_PAUSING:
- case SKD_DRVR_STATE_PAUSED:
- case SKD_DRVR_STATE_STARTING:
- case SKD_DRVR_STATE_RESTARTING:
- case SKD_DRVR_STATE_WAIT_BOOT:
- /* In case of starting, we haven't started the queue,
- * so we can't get here... but requests are
- * possibly hanging out waiting for us because we
- * reported the dev/skd0 already. They'll wait
- * forever if connect doesn't complete.
- * What to do??? delay dev/skd0 ??
- */
- case SKD_DRVR_STATE_BUSY:
- case SKD_DRVR_STATE_BUSY_IMMINENT:
- case SKD_DRVR_STATE_BUSY_ERASE:
- case SKD_DRVR_STATE_DRAINING_TIMEOUT:
- return;
-
- case SKD_DRVR_STATE_BUSY_SANITIZE:
- case SKD_DRVR_STATE_STOPPING:
- case SKD_DRVR_STATE_SYNCING:
- case SKD_DRVR_STATE_FAULT:
- case SKD_DRVR_STATE_DISAPPEARED:
- default:
- error = -EIO;
- break;
- }
-
- /* If we get here, terminate all pending block requeusts
- * with EIO and any scsi pass thru with appropriate sense
- */
-
- skd_fail_all_pending(skdev);
+ pci_unmap_sg(skdev->pdev, &skreq->sg[0], skreq->n_sg, skreq->data_dir);
}
/*
@@ -950,12 +694,22 @@ static void skd_request_fn_not_online(struct request_queue *q)
static void skd_timer_tick_not_online(struct skd_device *skdev);
+static void skd_start_queue(struct work_struct *work)
+{
+ struct skd_device *skdev = container_of(work, typeof(*skdev),
+ start_queue);
+
+ /*
+ * Although it is safe to call blk_start_queue() from interrupt
+ * context, blk_mq_start_hw_queues() must not be called from
+ * interrupt context.
+ */
+ blk_mq_start_hw_queues(skdev->queue);
+}
+
static void skd_timer_tick(ulong arg)
{
struct skd_device *skdev = (struct skd_device *)arg;
-
- u32 timo_slot;
- u32 overdue_timestamp;
unsigned long reqflags;
u32 state;
@@ -972,37 +726,9 @@ static void skd_timer_tick(ulong arg)
if (state != skdev->drive_state)
skd_isr_fwstate(skdev);
- if (skdev->state != SKD_DRVR_STATE_ONLINE) {
+ if (skdev->state != SKD_DRVR_STATE_ONLINE)
skd_timer_tick_not_online(skdev);
- goto timer_func_out;
- }
- skdev->timeout_stamp++;
- timo_slot = skdev->timeout_stamp & SKD_TIMEOUT_SLOT_MASK;
-
- /*
- * All requests that happened during the previous use of
- * this slot should be done by now. The previous use was
- * over 7 seconds ago.
- */
- if (skdev->timeout_slot[timo_slot] == 0)
- goto timer_func_out;
-
- /* Something is overdue */
- overdue_timestamp = skdev->timeout_stamp - SKD_N_TIMEOUT_SLOT;
-
- pr_debug("%s:%s:%d found %d timeouts, draining busy=%d\n",
- skdev->name, __func__, __LINE__,
- skdev->timeout_slot[timo_slot], skdev->in_flight);
- pr_err("(%s): Overdue IOs (%d), busy %d\n",
- skd_name(skdev), skdev->timeout_slot[timo_slot],
- skdev->in_flight);
- skdev->timer_countdown = SKD_DRAINING_TIMO;
- skdev->state = SKD_DRVR_STATE_DRAINING_TIMEOUT;
- skdev->timo_slot = timo_slot;
- blk_stop_queue(skdev->queue);
-
-timer_func_out:
mod_timer(&skdev->timer, (jiffies + HZ));
spin_unlock_irqrestore(&skdev->lock, reqflags);
@@ -1015,9 +741,9 @@ static void skd_timer_tick_not_online(struct skd_device *skdev)
case SKD_DRVR_STATE_LOAD:
break;
case SKD_DRVR_STATE_BUSY_SANITIZE:
- pr_debug("%s:%s:%d drive busy sanitize[%x], driver[%x]\n",
- skdev->name, __func__, __LINE__,
- skdev->drive_state, skdev->state);
+ dev_dbg(&skdev->pdev->dev,
+ "drive busy sanitize[%x], driver[%x]\n",
+ skdev->drive_state, skdev->state);
/* If we've been in sanitize for 3 seconds, we figure we're not
* going to get anymore completions, so recover requests now
*/
@@ -1025,22 +751,21 @@ static void skd_timer_tick_not_online(struct skd_device *skdev)
skdev->timer_countdown--;
return;
}
- skd_recover_requests(skdev, 0);
+ skd_recover_requests(skdev);
break;
case SKD_DRVR_STATE_BUSY:
case SKD_DRVR_STATE_BUSY_IMMINENT:
case SKD_DRVR_STATE_BUSY_ERASE:
- pr_debug("%s:%s:%d busy[%x], countdown=%d\n",
- skdev->name, __func__, __LINE__,
- skdev->state, skdev->timer_countdown);
+ dev_dbg(&skdev->pdev->dev, "busy[%x], countdown=%d\n",
+ skdev->state, skdev->timer_countdown);
if (skdev->timer_countdown > 0) {
skdev->timer_countdown--;
return;
}
- pr_debug("%s:%s:%d busy[%x], timedout=%d, restarting device.",
- skdev->name, __func__, __LINE__,
- skdev->state, skdev->timer_countdown);
+ dev_dbg(&skdev->pdev->dev,
+ "busy[%x], timedout=%d, restarting device.",
+ skdev->state, skdev->timer_countdown);
skd_restart_device(skdev);
break;
@@ -1054,12 +779,12 @@ static void skd_timer_tick_not_online(struct skd_device *skdev)
* revcover at some point. */
skdev->state = SKD_DRVR_STATE_FAULT;
- pr_err("(%s): DriveFault Connect Timeout (%x)\n",
- skd_name(skdev), skdev->drive_state);
+ dev_err(&skdev->pdev->dev, "DriveFault Connect Timeout (%x)\n",
+ skdev->drive_state);
/*start the queue so we can respond with error to requests */
/* wakeup anyone waiting for startup complete */
- blk_start_queue(skdev->queue);
+ schedule_work(&skdev->start_queue);
skdev->gendisk_on = -1;
wake_up_interruptible(&skdev->waitq);
break;
@@ -1072,29 +797,6 @@ static void skd_timer_tick_not_online(struct skd_device *skdev)
case SKD_DRVR_STATE_PAUSED:
break;
- case SKD_DRVR_STATE_DRAINING_TIMEOUT:
- pr_debug("%s:%s:%d "
- "draining busy [%d] tick[%d] qdb[%d] tmls[%d]\n",
- skdev->name, __func__, __LINE__,
- skdev->timo_slot,
- skdev->timer_countdown,
- skdev->in_flight,
- skdev->timeout_slot[skdev->timo_slot]);
- /* if the slot has cleared we can let the I/O continue */
- if (skdev->timeout_slot[skdev->timo_slot] == 0) {
- pr_debug("%s:%s:%d Slot drained, starting queue.\n",
- skdev->name, __func__, __LINE__);
- skdev->state = SKD_DRVR_STATE_ONLINE;
- blk_start_queue(skdev->queue);
- return;
- }
- if (skdev->timer_countdown > 0) {
- skdev->timer_countdown--;
- return;
- }
- skd_restart_device(skdev);
- break;
-
case SKD_DRVR_STATE_RESTARTING:
if (skdev->timer_countdown > 0) {
skdev->timer_countdown--;
@@ -1103,8 +805,9 @@ static void skd_timer_tick_not_online(struct skd_device *skdev)
/* For now, we fault the drive. Could attempt resets to
* revcover at some point. */
skdev->state = SKD_DRVR_STATE_FAULT;
- pr_err("(%s): DriveFault Reconnect Timeout (%x)\n",
- skd_name(skdev), skdev->drive_state);
+ dev_err(&skdev->pdev->dev,
+ "DriveFault Reconnect Timeout (%x)\n",
+ skdev->drive_state);
/*
* Recovering does two things:
@@ -1124,18 +827,18 @@ static void skd_timer_tick_not_online(struct skd_device *skdev)
/* It never came out of soft reset. Try to
* recover the requests and then let them
* fail. This is to mitigate hung processes. */
- skd_recover_requests(skdev, 0);
+ skd_recover_requests(skdev);
else {
- pr_err("(%s): Disable BusMaster (%x)\n",
- skd_name(skdev), skdev->drive_state);
+ dev_err(&skdev->pdev->dev, "Disable BusMaster (%x)\n",
+ skdev->drive_state);
pci_disable_device(skdev->pdev);
skd_disable_interrupts(skdev);
- skd_recover_requests(skdev, 0);
+ skd_recover_requests(skdev);
}
/*start the queue so we can respond with error to requests */
/* wakeup anyone waiting for startup complete */
- blk_start_queue(skdev->queue);
+ schedule_work(&skdev->start_queue);
skdev->gendisk_on = -1;
wake_up_interruptible(&skdev->waitq);
break;
@@ -1154,13 +857,11 @@ static int skd_start_timer(struct skd_device *skdev)
{
int rc;
- init_timer(&skdev->timer);
setup_timer(&skdev->timer, skd_timer_tick, (ulong)skdev);
rc = mod_timer(&skdev->timer, (jiffies + HZ));
if (rc)
- pr_err("%s: failed to start timer %d\n",
- __func__, rc);
+ dev_err(&skdev->pdev->dev, "failed to start timer %d\n", rc);
return rc;
}
@@ -1171,634 +872,6 @@ static void skd_kill_timer(struct skd_device *skdev)
/*
*****************************************************************************
- * IOCTL
- *****************************************************************************
- */
-static int skd_ioctl_sg_io(struct skd_device *skdev,
- fmode_t mode, void __user *argp);
-static int skd_sg_io_get_and_check_args(struct skd_device *skdev,
- struct skd_sg_io *sksgio);
-static int skd_sg_io_obtain_skspcl(struct skd_device *skdev,
- struct skd_sg_io *sksgio);
-static int skd_sg_io_prep_buffering(struct skd_device *skdev,
- struct skd_sg_io *sksgio);
-static int skd_sg_io_copy_buffer(struct skd_device *skdev,
- struct skd_sg_io *sksgio, int dxfer_dir);
-static int skd_sg_io_send_fitmsg(struct skd_device *skdev,
- struct skd_sg_io *sksgio);
-static int skd_sg_io_await(struct skd_device *skdev, struct skd_sg_io *sksgio);
-static int skd_sg_io_release_skspcl(struct skd_device *skdev,
- struct skd_sg_io *sksgio);
-static int skd_sg_io_put_status(struct skd_device *skdev,
- struct skd_sg_io *sksgio);
-
-static void skd_complete_special(struct skd_device *skdev,
- volatile struct fit_completion_entry_v1
- *skcomp,
- volatile struct fit_comp_error_info *skerr,
- struct skd_special_context *skspcl);
-
-static int skd_bdev_ioctl(struct block_device *bdev, fmode_t mode,
- uint cmd_in, ulong arg)
-{
- static const int sg_version_num = 30527;
- int rc = 0, timeout;
- struct gendisk *disk = bdev->bd_disk;
- struct skd_device *skdev = disk->private_data;
- int __user *p = (int __user *)arg;
-
- pr_debug("%s:%s:%d %s: CMD[%s] ioctl mode 0x%x, cmd 0x%x arg %0lx\n",
- skdev->name, __func__, __LINE__,
- disk->disk_name, current->comm, mode, cmd_in, arg);
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- switch (cmd_in) {
- case SG_SET_TIMEOUT:
- rc = get_user(timeout, p);
- if (!rc)
- disk->queue->sg_timeout = clock_t_to_jiffies(timeout);
- break;
- case SG_GET_TIMEOUT:
- rc = jiffies_to_clock_t(disk->queue->sg_timeout);
- break;
- case SG_GET_VERSION_NUM:
- rc = put_user(sg_version_num, p);
- break;
- case SG_IO:
- rc = skd_ioctl_sg_io(skdev, mode, (void __user *)arg);
- break;
-
- default:
- rc = -ENOTTY;
- break;
- }
-
- pr_debug("%s:%s:%d %s: completion rc %d\n",
- skdev->name, __func__, __LINE__, disk->disk_name, rc);
- return rc;
-}
-
-static int skd_ioctl_sg_io(struct skd_device *skdev, fmode_t mode,
- void __user *argp)
-{
- int rc;
- struct skd_sg_io sksgio;
-
- memset(&sksgio, 0, sizeof(sksgio));
- sksgio.mode = mode;
- sksgio.argp = argp;
- sksgio.iov = &sksgio.no_iov_iov;
-
- switch (skdev->state) {
- case SKD_DRVR_STATE_ONLINE:
- case SKD_DRVR_STATE_BUSY_IMMINENT:
- break;
-
- default:
- pr_debug("%s:%s:%d drive not online\n",
- skdev->name, __func__, __LINE__);
- rc = -ENXIO;
- goto out;
- }
-
- rc = skd_sg_io_get_and_check_args(skdev, &sksgio);
- if (rc)
- goto out;
-
- rc = skd_sg_io_obtain_skspcl(skdev, &sksgio);
- if (rc)
- goto out;
-
- rc = skd_sg_io_prep_buffering(skdev, &sksgio);
- if (rc)
- goto out;
-
- rc = skd_sg_io_copy_buffer(skdev, &sksgio, SG_DXFER_TO_DEV);
- if (rc)
- goto out;
-
- rc = skd_sg_io_send_fitmsg(skdev, &sksgio);
- if (rc)
- goto out;
-
- rc = skd_sg_io_await(skdev, &sksgio);
- if (rc)
- goto out;
-
- rc = skd_sg_io_copy_buffer(skdev, &sksgio, SG_DXFER_FROM_DEV);
- if (rc)
- goto out;
-
- rc = skd_sg_io_put_status(skdev, &sksgio);
- if (rc)
- goto out;
-
- rc = 0;
-
-out:
- skd_sg_io_release_skspcl(skdev, &sksgio);
-
- if (sksgio.iov != NULL && sksgio.iov != &sksgio.no_iov_iov)
- kfree(sksgio.iov);
- return rc;
-}
-
-static int skd_sg_io_get_and_check_args(struct skd_device *skdev,
- struct skd_sg_io *sksgio)
-{
- struct sg_io_hdr *sgp = &sksgio->sg;
- int i, acc;
-
- if (!access_ok(VERIFY_WRITE, sksgio->argp, sizeof(sg_io_hdr_t))) {
- pr_debug("%s:%s:%d access sg failed %p\n",
- skdev->name, __func__, __LINE__, sksgio->argp);
- return -EFAULT;
- }
-
- if (__copy_from_user(sgp, sksgio->argp, sizeof(sg_io_hdr_t))) {
- pr_debug("%s:%s:%d copy_from_user sg failed %p\n",
- skdev->name, __func__, __LINE__, sksgio->argp);
- return -EFAULT;
- }
-
- if (sgp->interface_id != SG_INTERFACE_ID_ORIG) {
- pr_debug("%s:%s:%d interface_id invalid 0x%x\n",
- skdev->name, __func__, __LINE__, sgp->interface_id);
- return -EINVAL;
- }
-
- if (sgp->cmd_len > sizeof(sksgio->cdb)) {
- pr_debug("%s:%s:%d cmd_len invalid %d\n",
- skdev->name, __func__, __LINE__, sgp->cmd_len);
- return -EINVAL;
- }
-
- if (sgp->iovec_count > 256) {
- pr_debug("%s:%s:%d iovec_count invalid %d\n",
- skdev->name, __func__, __LINE__, sgp->iovec_count);
- return -EINVAL;
- }
-
- if (sgp->dxfer_len > (PAGE_SIZE * SKD_N_SG_PER_SPECIAL)) {
- pr_debug("%s:%s:%d dxfer_len invalid %d\n",
- skdev->name, __func__, __LINE__, sgp->dxfer_len);
- return -EINVAL;
- }
-
- switch (sgp->dxfer_direction) {
- case SG_DXFER_NONE:
- acc = -1;
- break;
-
- case SG_DXFER_TO_DEV:
- acc = VERIFY_READ;
- break;
-
- case SG_DXFER_FROM_DEV:
- case SG_DXFER_TO_FROM_DEV:
- acc = VERIFY_WRITE;
- break;
-
- default:
- pr_debug("%s:%s:%d dxfer_dir invalid %d\n",
- skdev->name, __func__, __LINE__, sgp->dxfer_direction);
- return -EINVAL;
- }
-
- if (copy_from_user(sksgio->cdb, sgp->cmdp, sgp->cmd_len)) {
- pr_debug("%s:%s:%d copy_from_user cmdp failed %p\n",
- skdev->name, __func__, __LINE__, sgp->cmdp);
- return -EFAULT;
- }
-
- if (sgp->mx_sb_len != 0) {
- if (!access_ok(VERIFY_WRITE, sgp->sbp, sgp->mx_sb_len)) {
- pr_debug("%s:%s:%d access sbp failed %p\n",
- skdev->name, __func__, __LINE__, sgp->sbp);
- return -EFAULT;
- }
- }
-
- if (sgp->iovec_count == 0) {
- sksgio->iov[0].iov_base = sgp->dxferp;
- sksgio->iov[0].iov_len = sgp->dxfer_len;
- sksgio->iovcnt = 1;
- sksgio->dxfer_len = sgp->dxfer_len;
- } else {
- struct sg_iovec *iov;
- uint nbytes = sizeof(*iov) * sgp->iovec_count;
- size_t iov_data_len;
-
- iov = kmalloc(nbytes, GFP_KERNEL);
- if (iov == NULL) {
- pr_debug("%s:%s:%d alloc iovec failed %d\n",
- skdev->name, __func__, __LINE__,
- sgp->iovec_count);
- return -ENOMEM;
- }
- sksgio->iov = iov;
- sksgio->iovcnt = sgp->iovec_count;
-
- if (copy_from_user(iov, sgp->dxferp, nbytes)) {
- pr_debug("%s:%s:%d copy_from_user iovec failed %p\n",
- skdev->name, __func__, __LINE__, sgp->dxferp);
- return -EFAULT;
- }
-
- /*
- * Sum up the vecs, making sure they don't overflow
- */
- iov_data_len = 0;
- for (i = 0; i < sgp->iovec_count; i++) {
- if (iov_data_len + iov[i].iov_len < iov_data_len)
- return -EINVAL;
- iov_data_len += iov[i].iov_len;
- }
-
- /* SG_IO howto says that the shorter of the two wins */
- if (sgp->dxfer_len < iov_data_len) {
- sksgio->iovcnt = iov_shorten((struct iovec *)iov,
- sgp->iovec_count,
- sgp->dxfer_len);
- sksgio->dxfer_len = sgp->dxfer_len;
- } else
- sksgio->dxfer_len = iov_data_len;
- }
-
- if (sgp->dxfer_direction != SG_DXFER_NONE) {
- struct sg_iovec *iov = sksgio->iov;
- for (i = 0; i < sksgio->iovcnt; i++, iov++) {
- if (!access_ok(acc, iov->iov_base, iov->iov_len)) {
- pr_debug("%s:%s:%d access data failed %p/%d\n",
- skdev->name, __func__, __LINE__,
- iov->iov_base, (int)iov->iov_len);
- return -EFAULT;
- }
- }
- }
-
- return 0;
-}
-
-static int skd_sg_io_obtain_skspcl(struct skd_device *skdev,
- struct skd_sg_io *sksgio)
-{
- struct skd_special_context *skspcl = NULL;
- int rc;
-
- for (;;) {
- ulong flags;
-
- spin_lock_irqsave(&skdev->lock, flags);
- skspcl = skdev->skspcl_free_list;
- if (skspcl != NULL) {
- skdev->skspcl_free_list =
- (struct skd_special_context *)skspcl->req.next;
- skspcl->req.id += SKD_ID_INCR;
- skspcl->req.state = SKD_REQ_STATE_SETUP;
- skspcl->orphaned = 0;
- skspcl->req.n_sg = 0;
- }
- spin_unlock_irqrestore(&skdev->lock, flags);
-
- if (skspcl != NULL) {
- rc = 0;
- break;
- }
-
- pr_debug("%s:%s:%d blocking\n",
- skdev->name, __func__, __LINE__);
-
- rc = wait_event_interruptible_timeout(
- skdev->waitq,
- (skdev->skspcl_free_list != NULL),
- msecs_to_jiffies(sksgio->sg.timeout));
-
- pr_debug("%s:%s:%d unblocking, rc=%d\n",
- skdev->name, __func__, __LINE__, rc);
-
- if (rc <= 0) {
- if (rc == 0)
- rc = -ETIMEDOUT;
- else
- rc = -EINTR;
- break;
- }
- /*
- * If we get here rc > 0 meaning the timeout to
- * wait_event_interruptible_timeout() had time left, hence the
- * sought event -- non-empty free list -- happened.
- * Retry the allocation.
- */
- }
- sksgio->skspcl = skspcl;
-
- return rc;
-}
-
-static int skd_skreq_prep_buffering(struct skd_device *skdev,
- struct skd_request_context *skreq,
- u32 dxfer_len)
-{
- u32 resid = dxfer_len;
-
- /*
- * The DMA engine must have aligned addresses and byte counts.
- */
- resid += (-resid) & 3;
- skreq->sg_byte_count = resid;
-
- skreq->n_sg = 0;
-
- while (resid > 0) {
- u32 nbytes = PAGE_SIZE;
- u32 ix = skreq->n_sg;
- struct scatterlist *sg = &skreq->sg[ix];
- struct fit_sg_descriptor *sksg = &skreq->sksg_list[ix];
- struct page *page;
-
- if (nbytes > resid)
- nbytes = resid;
-
- page = alloc_page(GFP_KERNEL);
- if (page == NULL)
- return -ENOMEM;
-
- sg_set_page(sg, page, nbytes, 0);
-
- /* TODO: This should be going through a pci_???()
- * routine to do proper mapping. */
- sksg->control = FIT_SGD_CONTROL_NOT_LAST;
- sksg->byte_count = nbytes;
-
- sksg->host_side_addr = sg_phys(sg);
-
- sksg->dev_side_addr = 0;
- sksg->next_desc_ptr = skreq->sksg_dma_address +
- (ix + 1) * sizeof(*sksg);
-
- skreq->n_sg++;
- resid -= nbytes;
- }
-
- if (skreq->n_sg > 0) {
- u32 ix = skreq->n_sg - 1;
- struct fit_sg_descriptor *sksg = &skreq->sksg_list[ix];
-
- sksg->control = FIT_SGD_CONTROL_LAST;
- sksg->next_desc_ptr = 0;
- }
-
- if (unlikely(skdev->dbg_level > 1)) {
- u32 i;
-
- pr_debug("%s:%s:%d skreq=%x sksg_list=%p sksg_dma=%llx\n",
- skdev->name, __func__, __LINE__,
- skreq->id, skreq->sksg_list, skreq->sksg_dma_address);
- for (i = 0; i < skreq->n_sg; i++) {
- struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
-
- pr_debug("%s:%s:%d sg[%d] count=%u ctrl=0x%x "
- "addr=0x%llx next=0x%llx\n",
- skdev->name, __func__, __LINE__,
- i, sgd->byte_count, sgd->control,
- sgd->host_side_addr, sgd->next_desc_ptr);
- }
- }
-
- return 0;
-}
-
-static int skd_sg_io_prep_buffering(struct skd_device *skdev,
- struct skd_sg_io *sksgio)
-{
- struct skd_special_context *skspcl = sksgio->skspcl;
- struct skd_request_context *skreq = &skspcl->req;
- u32 dxfer_len = sksgio->dxfer_len;
- int rc;
-
- rc = skd_skreq_prep_buffering(skdev, skreq, dxfer_len);
- /*
- * Eventually, errors or not, skd_release_special() is called
- * to recover allocations including partial allocations.
- */
- return rc;
-}
-
-static int skd_sg_io_copy_buffer(struct skd_device *skdev,
- struct skd_sg_io *sksgio, int dxfer_dir)
-{
- struct skd_special_context *skspcl = sksgio->skspcl;
- u32 iov_ix = 0;
- struct sg_iovec curiov;
- u32 sksg_ix = 0;
- u8 *bufp = NULL;
- u32 buf_len = 0;
- u32 resid = sksgio->dxfer_len;
- int rc;
-
- curiov.iov_len = 0;
- curiov.iov_base = NULL;
-
- if (dxfer_dir != sksgio->sg.dxfer_direction) {
- if (dxfer_dir != SG_DXFER_TO_DEV ||
- sksgio->sg.dxfer_direction != SG_DXFER_TO_FROM_DEV)
- return 0;
- }
-
- while (resid > 0) {
- u32 nbytes = PAGE_SIZE;
-
- if (curiov.iov_len == 0) {
- curiov = sksgio->iov[iov_ix++];
- continue;
- }
-
- if (buf_len == 0) {
- struct page *page;
- page = sg_page(&skspcl->req.sg[sksg_ix++]);
- bufp = page_address(page);
- buf_len = PAGE_SIZE;
- }
-
- nbytes = min_t(u32, nbytes, resid);
- nbytes = min_t(u32, nbytes, curiov.iov_len);
- nbytes = min_t(u32, nbytes, buf_len);
-
- if (dxfer_dir == SG_DXFER_TO_DEV)
- rc = __copy_from_user(bufp, curiov.iov_base, nbytes);
- else
- rc = __copy_to_user(curiov.iov_base, bufp, nbytes);
-
- if (rc)
- return -EFAULT;
-
- resid -= nbytes;
- curiov.iov_len -= nbytes;
- curiov.iov_base += nbytes;
- buf_len -= nbytes;
- }
-
- return 0;
-}
-
-static int skd_sg_io_send_fitmsg(struct skd_device *skdev,
- struct skd_sg_io *sksgio)
-{
- struct skd_special_context *skspcl = sksgio->skspcl;
- struct fit_msg_hdr *fmh = (struct fit_msg_hdr *)skspcl->msg_buf;
- struct skd_scsi_request *scsi_req = (struct skd_scsi_request *)&fmh[1];
-
- memset(skspcl->msg_buf, 0, SKD_N_SPECIAL_FITMSG_BYTES);
-
- /* Initialize the FIT msg header */
- fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
- fmh->num_protocol_cmds_coalesced = 1;
-
- /* Initialize the SCSI request */
- if (sksgio->sg.dxfer_direction != SG_DXFER_NONE)
- scsi_req->hdr.sg_list_dma_address =
- cpu_to_be64(skspcl->req.sksg_dma_address);
- scsi_req->hdr.tag = skspcl->req.id;
- scsi_req->hdr.sg_list_len_bytes =
- cpu_to_be32(skspcl->req.sg_byte_count);
- memcpy(scsi_req->cdb, sksgio->cdb, sizeof(scsi_req->cdb));
-
- skspcl->req.state = SKD_REQ_STATE_BUSY;
- skd_send_special_fitmsg(skdev, skspcl);
-
- return 0;
-}
-
-static int skd_sg_io_await(struct skd_device *skdev, struct skd_sg_io *sksgio)
-{
- unsigned long flags;
- int rc;
-
- rc = wait_event_interruptible_timeout(skdev->waitq,
- (sksgio->skspcl->req.state !=
- SKD_REQ_STATE_BUSY),
- msecs_to_jiffies(sksgio->sg.
- timeout));
-
- spin_lock_irqsave(&skdev->lock, flags);
-
- if (sksgio->skspcl->req.state == SKD_REQ_STATE_ABORTED) {
- pr_debug("%s:%s:%d skspcl %p aborted\n",
- skdev->name, __func__, __LINE__, sksgio->skspcl);
-
- /* Build check cond, sense and let command finish. */
- /* For a timeout, we must fabricate completion and sense
- * data to complete the command */
- sksgio->skspcl->req.completion.status =
- SAM_STAT_CHECK_CONDITION;
-
- memset(&sksgio->skspcl->req.err_info, 0,
- sizeof(sksgio->skspcl->req.err_info));
- sksgio->skspcl->req.err_info.type = 0x70;
- sksgio->skspcl->req.err_info.key = ABORTED_COMMAND;
- sksgio->skspcl->req.err_info.code = 0x44;
- sksgio->skspcl->req.err_info.qual = 0;
- rc = 0;
- } else if (sksgio->skspcl->req.state != SKD_REQ_STATE_BUSY)
- /* No longer on the adapter. We finish. */
- rc = 0;
- else {
- /* Something's gone wrong. Still busy. Timeout or
- * user interrupted (control-C). Mark as an orphan
- * so it will be disposed when completed. */
- sksgio->skspcl->orphaned = 1;
- sksgio->skspcl = NULL;
- if (rc == 0) {
- pr_debug("%s:%s:%d timed out %p (%u ms)\n",
- skdev->name, __func__, __LINE__,
- sksgio, sksgio->sg.timeout);
- rc = -ETIMEDOUT;
- } else {
- pr_debug("%s:%s:%d cntlc %p\n",
- skdev->name, __func__, __LINE__, sksgio);
- rc = -EINTR;
- }
- }
-
- spin_unlock_irqrestore(&skdev->lock, flags);
-
- return rc;
-}
-
-static int skd_sg_io_put_status(struct skd_device *skdev,
- struct skd_sg_io *sksgio)
-{
- struct sg_io_hdr *sgp = &sksgio->sg;
- struct skd_special_context *skspcl = sksgio->skspcl;
- int resid = 0;
-
- u32 nb = be32_to_cpu(skspcl->req.completion.num_returned_bytes);
-
- sgp->status = skspcl->req.completion.status;
- resid = sksgio->dxfer_len - nb;
-
- sgp->masked_status = sgp->status & STATUS_MASK;
- sgp->msg_status = 0;
- sgp->host_status = 0;
- sgp->driver_status = 0;
- sgp->resid = resid;
- if (sgp->masked_status || sgp->host_status || sgp->driver_status)
- sgp->info |= SG_INFO_CHECK;
-
- pr_debug("%s:%s:%d status %x masked %x resid 0x%x\n",
- skdev->name, __func__, __LINE__,
- sgp->status, sgp->masked_status, sgp->resid);
-
- if (sgp->masked_status == SAM_STAT_CHECK_CONDITION) {
- if (sgp->mx_sb_len > 0) {
- struct fit_comp_error_info *ei = &skspcl->req.err_info;
- u32 nbytes = sizeof(*ei);
-
- nbytes = min_t(u32, nbytes, sgp->mx_sb_len);
-
- sgp->sb_len_wr = nbytes;
-
- if (__copy_to_user(sgp->sbp, ei, nbytes)) {
- pr_debug("%s:%s:%d copy_to_user sense failed %p\n",
- skdev->name, __func__, __LINE__,
- sgp->sbp);
- return -EFAULT;
- }
- }
- }
-
- if (__copy_to_user(sksgio->argp, sgp, sizeof(sg_io_hdr_t))) {
- pr_debug("%s:%s:%d copy_to_user sg failed %p\n",
- skdev->name, __func__, __LINE__, sksgio->argp);
- return -EFAULT;
- }
-
- return 0;
-}
-
-static int skd_sg_io_release_skspcl(struct skd_device *skdev,
- struct skd_sg_io *sksgio)
-{
- struct skd_special_context *skspcl = sksgio->skspcl;
-
- if (skspcl != NULL) {
- ulong flags;
-
- sksgio->skspcl = NULL;
-
- spin_lock_irqsave(&skdev->lock, flags);
- skd_release_special(skdev, skspcl);
- spin_unlock_irqrestore(&skdev->lock, flags);
- }
-
- return 0;
-}
-
-/*
- *****************************************************************************
* INTERNAL REQUESTS -- generated by driver itself
*****************************************************************************
*/
@@ -1811,14 +884,15 @@ static int skd_format_internal_skspcl(struct skd_device *skdev)
uint64_t dma_address;
struct skd_scsi_request *scsi;
- fmh = (struct fit_msg_hdr *)&skspcl->msg_buf[0];
+ fmh = &skspcl->msg_buf->fmh;
fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
fmh->num_protocol_cmds_coalesced = 1;
- scsi = (struct skd_scsi_request *)&skspcl->msg_buf[64];
+ scsi = &skspcl->msg_buf->scsi[0];
memset(scsi, 0, sizeof(*scsi));
dma_address = skspcl->req.sksg_dma_address;
scsi->hdr.sg_list_dma_address = cpu_to_be64(dma_address);
+ skspcl->req.n_sg = 1;
sgd->control = FIT_SGD_CONTROL_LAST;
sgd->byte_count = 0;
sgd->host_side_addr = skspcl->db_dma_address;
@@ -1846,11 +920,9 @@ static void skd_send_internal_skspcl(struct skd_device *skdev,
*/
return;
- SKD_ASSERT((skspcl->req.id & SKD_ID_INCR) == 0);
skspcl->req.state = SKD_REQ_STATE_BUSY;
- skspcl->req.id += SKD_ID_INCR;
- scsi = (struct skd_scsi_request *)&skspcl->msg_buf[64];
+ scsi = &skspcl->msg_buf->scsi[0];
scsi->hdr.tag = skspcl->req.id;
memset(scsi->cdb, 0, sizeof(scsi->cdb));
@@ -1940,32 +1012,35 @@ static void skd_log_check_status(struct skd_device *skdev, u8 status, u8 key,
/* If the check condition is of special interest, log a message */
if ((status == SAM_STAT_CHECK_CONDITION) && (key == 0x02)
&& (code == 0x04) && (qual == 0x06)) {
- pr_err("(%s): *** LOST_WRITE_DATA ERROR *** key/asc/"
- "ascq/fruc %02x/%02x/%02x/%02x\n",
- skd_name(skdev), key, code, qual, fruc);
+ dev_err(&skdev->pdev->dev,
+ "*** LOST_WRITE_DATA ERROR *** key/asc/ascq/fruc %02x/%02x/%02x/%02x\n",
+ key, code, qual, fruc);
}
}
static void skd_complete_internal(struct skd_device *skdev,
- volatile struct fit_completion_entry_v1
- *skcomp,
- volatile struct fit_comp_error_info *skerr,
+ struct fit_completion_entry_v1 *skcomp,
+ struct fit_comp_error_info *skerr,
struct skd_special_context *skspcl)
{
u8 *buf = skspcl->data_buf;
u8 status;
int i;
- struct skd_scsi_request *scsi =
- (struct skd_scsi_request *)&skspcl->msg_buf[64];
+ struct skd_scsi_request *scsi = &skspcl->msg_buf->scsi[0];
+
+ lockdep_assert_held(&skdev->lock);
SKD_ASSERT(skspcl == &skdev->internal_skspcl);
- pr_debug("%s:%s:%d complete internal %x\n",
- skdev->name, __func__, __LINE__, scsi->cdb[0]);
+ dev_dbg(&skdev->pdev->dev, "complete internal %x\n", scsi->cdb[0]);
+
+ dma_sync_single_for_cpu(&skdev->pdev->dev,
+ skspcl->db_dma_address,
+ skspcl->req.sksg_list[0].byte_count,
+ DMA_BIDIRECTIONAL);
skspcl->req.completion = *skcomp;
skspcl->req.state = SKD_REQ_STATE_IDLE;
- skspcl->req.id += SKD_ID_INCR;
status = skspcl->req.completion.status;
@@ -1981,14 +1056,15 @@ static void skd_complete_internal(struct skd_device *skdev,
skd_send_internal_skspcl(skdev, skspcl, WRITE_BUFFER);
else {
if (skdev->state == SKD_DRVR_STATE_STOPPING) {
- pr_debug("%s:%s:%d TUR failed, don't send anymore state 0x%x\n",
- skdev->name, __func__, __LINE__,
- skdev->state);
+ dev_dbg(&skdev->pdev->dev,
+ "TUR failed, don't send anymore state 0x%x\n",
+ skdev->state);
return;
}
- pr_debug("%s:%s:%d **** TUR failed, retry skerr\n",
- skdev->name, __func__, __LINE__);
- skd_send_internal_skspcl(skdev, skspcl, 0x00);
+ dev_dbg(&skdev->pdev->dev,
+ "**** TUR failed, retry skerr\n");
+ skd_send_internal_skspcl(skdev, skspcl,
+ TEST_UNIT_READY);
}
break;
@@ -1997,14 +1073,15 @@ static void skd_complete_internal(struct skd_device *skdev,
skd_send_internal_skspcl(skdev, skspcl, READ_BUFFER);
else {
if (skdev->state == SKD_DRVR_STATE_STOPPING) {
- pr_debug("%s:%s:%d write buffer failed, don't send anymore state 0x%x\n",
- skdev->name, __func__, __LINE__,
- skdev->state);
+ dev_dbg(&skdev->pdev->dev,
+ "write buffer failed, don't send anymore state 0x%x\n",
+ skdev->state);
return;
}
- pr_debug("%s:%s:%d **** write buffer failed, retry skerr\n",
- skdev->name, __func__, __LINE__);
- skd_send_internal_skspcl(skdev, skspcl, 0x00);
+ dev_dbg(&skdev->pdev->dev,
+ "**** write buffer failed, retry skerr\n");
+ skd_send_internal_skspcl(skdev, skspcl,
+ TEST_UNIT_READY);
}
break;
@@ -2014,33 +1091,31 @@ static void skd_complete_internal(struct skd_device *skdev,
skd_send_internal_skspcl(skdev, skspcl,
READ_CAPACITY);
else {
- pr_err(
- "(%s):*** W/R Buffer mismatch %d ***\n",
- skd_name(skdev), skdev->connect_retries);
+ dev_err(&skdev->pdev->dev,
+ "*** W/R Buffer mismatch %d ***\n",
+ skdev->connect_retries);
if (skdev->connect_retries <
SKD_MAX_CONNECT_RETRIES) {
skdev->connect_retries++;
skd_soft_reset(skdev);
} else {
- pr_err(
- "(%s): W/R Buffer Connect Error\n",
- skd_name(skdev));
+ dev_err(&skdev->pdev->dev,
+ "W/R Buffer Connect Error\n");
return;
}
}
} else {
if (skdev->state == SKD_DRVR_STATE_STOPPING) {
- pr_debug("%s:%s:%d "
- "read buffer failed, don't send anymore state 0x%x\n",
- skdev->name, __func__, __LINE__,
- skdev->state);
+ dev_dbg(&skdev->pdev->dev,
+ "read buffer failed, don't send anymore state 0x%x\n",
+ skdev->state);
return;
}
- pr_debug("%s:%s:%d "
- "**** read buffer failed, retry skerr\n",
- skdev->name, __func__, __LINE__);
- skd_send_internal_skspcl(skdev, skspcl, 0x00);
+ dev_dbg(&skdev->pdev->dev,
+ "**** read buffer failed, retry skerr\n");
+ skd_send_internal_skspcl(skdev, skspcl,
+ TEST_UNIT_READY);
}
break;
@@ -2054,10 +1129,9 @@ static void skd_complete_internal(struct skd_device *skdev,
(buf[4] << 24) | (buf[5] << 16) |
(buf[6] << 8) | buf[7];
- pr_debug("%s:%s:%d last lba %d, bs %d\n",
- skdev->name, __func__, __LINE__,
- skdev->read_cap_last_lba,
- skdev->read_cap_blocksize);
+ dev_dbg(&skdev->pdev->dev, "last lba %d, bs %d\n",
+ skdev->read_cap_last_lba,
+ skdev->read_cap_blocksize);
set_capacity(skdev->disk, skdev->read_cap_last_lba + 1);
@@ -2068,13 +1142,10 @@ static void skd_complete_internal(struct skd_device *skdev,
(skerr->key == MEDIUM_ERROR)) {
skdev->read_cap_last_lba = ~0;
set_capacity(skdev->disk, skdev->read_cap_last_lba + 1);
- pr_debug("%s:%s:%d "
- "**** MEDIUM ERROR caused READCAP to fail, ignore failure and continue to inquiry\n",
- skdev->name, __func__, __LINE__);
+ dev_dbg(&skdev->pdev->dev, "**** MEDIUM ERROR caused READCAP to fail, ignore failure and continue to inquiry\n");
skd_send_internal_skspcl(skdev, skspcl, INQUIRY);
} else {
- pr_debug("%s:%s:%d **** READCAP failed, retry TUR\n",
- skdev->name, __func__, __LINE__);
+ dev_dbg(&skdev->pdev->dev, "**** READCAP failed, retry TUR\n");
skd_send_internal_skspcl(skdev, skspcl,
TEST_UNIT_READY);
}
@@ -2091,8 +1162,7 @@ static void skd_complete_internal(struct skd_device *skdev,
}
if (skd_unquiesce_dev(skdev) < 0)
- pr_debug("%s:%s:%d **** failed, to ONLINE device\n",
- skdev->name, __func__, __LINE__);
+ dev_dbg(&skdev->pdev->dev, "**** failed, to ONLINE device\n");
/* connection is complete */
skdev->connect_retries = 0;
break;
@@ -2120,27 +1190,20 @@ static void skd_send_fitmsg(struct skd_device *skdev,
struct skd_fitmsg_context *skmsg)
{
u64 qcmd;
- struct fit_msg_hdr *fmh;
- pr_debug("%s:%s:%d dma address 0x%llx, busy=%d\n",
- skdev->name, __func__, __LINE__,
- skmsg->mb_dma_address, skdev->in_flight);
- pr_debug("%s:%s:%d msg_buf 0x%p, offset %x\n",
- skdev->name, __func__, __LINE__,
- skmsg->msg_buf, skmsg->offset);
+ dev_dbg(&skdev->pdev->dev, "dma address 0x%llx, busy=%d\n",
+ skmsg->mb_dma_address, skd_in_flight(skdev));
+ dev_dbg(&skdev->pdev->dev, "msg_buf %p\n", skmsg->msg_buf);
qcmd = skmsg->mb_dma_address;
qcmd |= FIT_QCMD_QID_NORMAL;
- fmh = (struct fit_msg_hdr *)skmsg->msg_buf;
- skmsg->outstanding = fmh->num_protocol_cmds_coalesced;
-
if (unlikely(skdev->dbg_level > 1)) {
u8 *bp = (u8 *)skmsg->msg_buf;
int i;
for (i = 0; i < skmsg->length; i += 8) {
- pr_debug("%s:%s:%d msg[%2d] %8ph\n",
- skdev->name, __func__, __LINE__, i, &bp[i]);
+ dev_dbg(&skdev->pdev->dev, "msg[%2d] %8ph\n", i,
+ &bp[i]);
if (i == 0)
i = 64 - 8;
}
@@ -2160,6 +1223,12 @@ static void skd_send_fitmsg(struct skd_device *skdev,
*/
qcmd |= FIT_QCMD_MSGSIZE_64;
+ dma_sync_single_for_device(&skdev->pdev->dev, skmsg->mb_dma_address,
+ skmsg->length, DMA_TO_DEVICE);
+
+ /* Make sure skd_msg_buf is written before the doorbell is triggered. */
+ smp_wmb();
+
SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND);
}
@@ -2168,30 +1237,31 @@ static void skd_send_special_fitmsg(struct skd_device *skdev,
{
u64 qcmd;
+ WARN_ON_ONCE(skspcl->req.n_sg != 1);
+
if (unlikely(skdev->dbg_level > 1)) {
u8 *bp = (u8 *)skspcl->msg_buf;
int i;
for (i = 0; i < SKD_N_SPECIAL_FITMSG_BYTES; i += 8) {
- pr_debug("%s:%s:%d spcl[%2d] %8ph\n",
- skdev->name, __func__, __LINE__, i, &bp[i]);
+ dev_dbg(&skdev->pdev->dev, " spcl[%2d] %8ph\n", i,
+ &bp[i]);
if (i == 0)
i = 64 - 8;
}
- pr_debug("%s:%s:%d skspcl=%p id=%04x sksg_list=%p sksg_dma=%llx\n",
- skdev->name, __func__, __LINE__,
- skspcl, skspcl->req.id, skspcl->req.sksg_list,
- skspcl->req.sksg_dma_address);
+ dev_dbg(&skdev->pdev->dev,
+ "skspcl=%p id=%04x sksg_list=%p sksg_dma=%llx\n",
+ skspcl, skspcl->req.id, skspcl->req.sksg_list,
+ skspcl->req.sksg_dma_address);
for (i = 0; i < skspcl->req.n_sg; i++) {
struct fit_sg_descriptor *sgd =
&skspcl->req.sksg_list[i];
- pr_debug("%s:%s:%d sg[%d] count=%u ctrl=0x%x "
- "addr=0x%llx next=0x%llx\n",
- skdev->name, __func__, __LINE__,
- i, sgd->byte_count, sgd->control,
- sgd->host_side_addr, sgd->next_desc_ptr);
+ dev_dbg(&skdev->pdev->dev,
+ " sg[%d] count=%u ctrl=0x%x addr=0x%llx next=0x%llx\n",
+ i, sgd->byte_count, sgd->control,
+ sgd->host_side_addr, sgd->next_desc_ptr);
}
}
@@ -2202,6 +1272,20 @@ static void skd_send_special_fitmsg(struct skd_device *skdev,
qcmd = skspcl->mb_dma_address;
qcmd |= FIT_QCMD_QID_NORMAL + FIT_QCMD_MSGSIZE_128;
+ dma_sync_single_for_device(&skdev->pdev->dev, skspcl->mb_dma_address,
+ SKD_N_SPECIAL_FITMSG_BYTES, DMA_TO_DEVICE);
+ dma_sync_single_for_device(&skdev->pdev->dev,
+ skspcl->req.sksg_dma_address,
+ 1 * sizeof(struct fit_sg_descriptor),
+ DMA_TO_DEVICE);
+ dma_sync_single_for_device(&skdev->pdev->dev,
+ skspcl->db_dma_address,
+ skspcl->req.sksg_list[0].byte_count,
+ DMA_BIDIRECTIONAL);
+
+ /* Make sure skd_msg_buf is written before the doorbell is triggered. */
+ smp_wmb();
+
SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND);
}
@@ -2212,8 +1296,8 @@ static void skd_send_special_fitmsg(struct skd_device *skdev,
*/
static void skd_complete_other(struct skd_device *skdev,
- volatile struct fit_completion_entry_v1 *skcomp,
- volatile struct fit_comp_error_info *skerr);
+ struct fit_completion_entry_v1 *skcomp,
+ struct fit_comp_error_info *skerr);
struct sns_info {
u8 type;
@@ -2262,21 +1346,20 @@ static struct sns_info skd_chkstat_table[] = {
static enum skd_check_status_action
skd_check_status(struct skd_device *skdev,
- u8 cmp_status, volatile struct fit_comp_error_info *skerr)
+ u8 cmp_status, struct fit_comp_error_info *skerr)
{
- int i, n;
+ int i;
- pr_err("(%s): key/asc/ascq/fruc %02x/%02x/%02x/%02x\n",
- skd_name(skdev), skerr->key, skerr->code, skerr->qual,
- skerr->fruc);
+ dev_err(&skdev->pdev->dev, "key/asc/ascq/fruc %02x/%02x/%02x/%02x\n",
+ skerr->key, skerr->code, skerr->qual, skerr->fruc);
- pr_debug("%s:%s:%d stat: t=%02x stat=%02x k=%02x c=%02x q=%02x fruc=%02x\n",
- skdev->name, __func__, __LINE__, skerr->type, cmp_status,
- skerr->key, skerr->code, skerr->qual, skerr->fruc);
+ dev_dbg(&skdev->pdev->dev,
+ "stat: t=%02x stat=%02x k=%02x c=%02x q=%02x fruc=%02x\n",
+ skerr->type, cmp_status, skerr->key, skerr->code, skerr->qual,
+ skerr->fruc);
/* Does the info match an entry in the good category? */
- n = sizeof(skd_chkstat_table) / sizeof(skd_chkstat_table[0]);
- for (i = 0; i < n; i++) {
+ for (i = 0; i < ARRAY_SIZE(skd_chkstat_table); i++) {
struct sns_info *sns = &skd_chkstat_table[i];
if (sns->mask & 0x10)
@@ -2300,10 +1383,9 @@ skd_check_status(struct skd_device *skdev,
continue;
if (sns->action == SKD_CHECK_STATUS_REPORT_SMART_ALERT) {
- pr_err("(%s): SMART Alert: sense key/asc/ascq "
- "%02x/%02x/%02x\n",
- skd_name(skdev), skerr->key,
- skerr->code, skerr->qual);
+ dev_err(&skdev->pdev->dev,
+ "SMART Alert: sense key/asc/ascq %02x/%02x/%02x\n",
+ skerr->key, skerr->code, skerr->qual);
}
return sns->action;
}
@@ -2312,335 +1394,80 @@ skd_check_status(struct skd_device *skdev,
* zero status means good
*/
if (cmp_status) {
- pr_debug("%s:%s:%d status check: error\n",
- skdev->name, __func__, __LINE__);
+ dev_dbg(&skdev->pdev->dev, "status check: error\n");
return SKD_CHECK_STATUS_REPORT_ERROR;
}
- pr_debug("%s:%s:%d status check good default\n",
- skdev->name, __func__, __LINE__);
+ dev_dbg(&skdev->pdev->dev, "status check good default\n");
return SKD_CHECK_STATUS_REPORT_GOOD;
}
static void skd_resolve_req_exception(struct skd_device *skdev,
- struct skd_request_context *skreq)
+ struct skd_request_context *skreq,
+ struct request *req)
{
u8 cmp_status = skreq->completion.status;
switch (skd_check_status(skdev, cmp_status, &skreq->err_info)) {
case SKD_CHECK_STATUS_REPORT_GOOD:
case SKD_CHECK_STATUS_REPORT_SMART_ALERT:
- skd_end_request(skdev, skreq, BLK_STS_OK);
+ skreq->status = BLK_STS_OK;
+ blk_mq_complete_request(req);
break;
case SKD_CHECK_STATUS_BUSY_IMMINENT:
skd_log_skreq(skdev, skreq, "retry(busy)");
- blk_requeue_request(skdev->queue, skreq->req);
- pr_info("(%s) drive BUSY imminent\n", skd_name(skdev));
+ blk_requeue_request(skdev->queue, req);
+ dev_info(&skdev->pdev->dev, "drive BUSY imminent\n");
skdev->state = SKD_DRVR_STATE_BUSY_IMMINENT;
skdev->timer_countdown = SKD_TIMER_MINUTES(20);
skd_quiesce_dev(skdev);
break;
case SKD_CHECK_STATUS_REQUEUE_REQUEST:
- if ((unsigned long) ++skreq->req->special < SKD_MAX_RETRIES) {
+ if ((unsigned long) ++req->special < SKD_MAX_RETRIES) {
skd_log_skreq(skdev, skreq, "retry");
- blk_requeue_request(skdev->queue, skreq->req);
+ blk_requeue_request(skdev->queue, req);
break;
}
- /* fall through to report error */
+ /* fall through */
case SKD_CHECK_STATUS_REPORT_ERROR:
default:
- skd_end_request(skdev, skreq, BLK_STS_IOERR);
+ skreq->status = BLK_STS_IOERR;
+ blk_mq_complete_request(req);
break;
}
}
-/* assume spinlock is already held */
static void skd_release_skreq(struct skd_device *skdev,
struct skd_request_context *skreq)
{
- u32 msg_slot;
- struct skd_fitmsg_context *skmsg;
-
- u32 timo_slot;
-
- /*
- * Reclaim the FIT msg buffer if this is
- * the first of the requests it carried to
- * be completed. The FIT msg buffer used to
- * send this request cannot be reused until
- * we are sure the s1120 card has copied
- * it to its memory. The FIT msg might have
- * contained several requests. As soon as
- * any of them are completed we know that
- * the entire FIT msg was transferred.
- * Only the first completed request will
- * match the FIT msg buffer id. The FIT
- * msg buffer id is immediately updated.
- * When subsequent requests complete the FIT
- * msg buffer id won't match, so we know
- * quite cheaply that it is already done.
- */
- msg_slot = skreq->fitmsg_id & SKD_ID_SLOT_MASK;
- SKD_ASSERT(msg_slot < skdev->num_fitmsg_context);
-
- skmsg = &skdev->skmsg_table[msg_slot];
- if (skmsg->id == skreq->fitmsg_id) {
- SKD_ASSERT(skmsg->state == SKD_MSG_STATE_BUSY);
- SKD_ASSERT(skmsg->outstanding > 0);
- skmsg->outstanding--;
- if (skmsg->outstanding == 0) {
- skmsg->state = SKD_MSG_STATE_IDLE;
- skmsg->id += SKD_ID_INCR;
- skmsg->next = skdev->skmsg_free_list;
- skdev->skmsg_free_list = skmsg;
- }
- }
-
- /*
- * Decrease the number of active requests.
- * Also decrements the count in the timeout slot.
- */
- SKD_ASSERT(skdev->in_flight > 0);
- skdev->in_flight -= 1;
-
- timo_slot = skreq->timeout_stamp & SKD_TIMEOUT_SLOT_MASK;
- SKD_ASSERT(skdev->timeout_slot[timo_slot] > 0);
- skdev->timeout_slot[timo_slot] -= 1;
-
- /*
- * Reset backpointer
- */
- skreq->req = NULL;
-
/*
* Reclaim the skd_request_context
*/
skreq->state = SKD_REQ_STATE_IDLE;
- skreq->id += SKD_ID_INCR;
- skreq->next = skdev->skreq_free_list;
- skdev->skreq_free_list = skreq;
}
-#define DRIVER_INQ_EVPD_PAGE_CODE 0xDA
-
-static void skd_do_inq_page_00(struct skd_device *skdev,
- volatile struct fit_completion_entry_v1 *skcomp,
- volatile struct fit_comp_error_info *skerr,
- uint8_t *cdb, uint8_t *buf)
-{
- uint16_t insert_pt, max_bytes, drive_pages, drive_bytes, new_size;
-
- /* Caller requested "supported pages". The driver needs to insert
- * its page.
- */
- pr_debug("%s:%s:%d skd_do_driver_inquiry: modify supported pages.\n",
- skdev->name, __func__, __LINE__);
-
- /* If the device rejected the request because the CDB was
- * improperly formed, then just leave.
- */
- if (skcomp->status == SAM_STAT_CHECK_CONDITION &&
- skerr->key == ILLEGAL_REQUEST && skerr->code == 0x24)
- return;
-
- /* Get the amount of space the caller allocated */
- max_bytes = (cdb[3] << 8) | cdb[4];
-
- /* Get the number of pages actually returned by the device */
- drive_pages = (buf[2] << 8) | buf[3];
- drive_bytes = drive_pages + 4;
- new_size = drive_pages + 1;
-
- /* Supported pages must be in numerical order, so find where
- * the driver page needs to be inserted into the list of
- * pages returned by the device.
- */
- for (insert_pt = 4; insert_pt < drive_bytes; insert_pt++) {
- if (buf[insert_pt] == DRIVER_INQ_EVPD_PAGE_CODE)
- return; /* Device using this page code. abort */
- else if (buf[insert_pt] > DRIVER_INQ_EVPD_PAGE_CODE)
- break;
- }
-
- if (insert_pt < max_bytes) {
- uint16_t u;
-
- /* Shift everything up one byte to make room. */
- for (u = new_size + 3; u > insert_pt; u--)
- buf[u] = buf[u - 1];
- buf[insert_pt] = DRIVER_INQ_EVPD_PAGE_CODE;
-
- /* SCSI byte order increment of num_returned_bytes by 1 */
- skcomp->num_returned_bytes =
- be32_to_cpu(skcomp->num_returned_bytes) + 1;
- skcomp->num_returned_bytes =
- be32_to_cpu(skcomp->num_returned_bytes);
- }
-
- /* update page length field to reflect the driver's page too */
- buf[2] = (uint8_t)((new_size >> 8) & 0xFF);
- buf[3] = (uint8_t)((new_size >> 0) & 0xFF);
-}
-
-static void skd_get_link_info(struct pci_dev *pdev, u8 *speed, u8 *width)
-{
- int pcie_reg;
- u16 pci_bus_speed;
- u8 pci_lanes;
-
- pcie_reg = pci_find_capability(pdev, PCI_CAP_ID_EXP);
- if (pcie_reg) {
- u16 linksta;
- pci_read_config_word(pdev, pcie_reg + PCI_EXP_LNKSTA, &linksta);
-
- pci_bus_speed = linksta & 0xF;
- pci_lanes = (linksta & 0x3F0) >> 4;
- } else {
- *speed = STEC_LINK_UNKNOWN;
- *width = 0xFF;
- return;
- }
-
- switch (pci_bus_speed) {
- case 1:
- *speed = STEC_LINK_2_5GTS;
- break;
- case 2:
- *speed = STEC_LINK_5GTS;
- break;
- case 3:
- *speed = STEC_LINK_8GTS;
- break;
- default:
- *speed = STEC_LINK_UNKNOWN;
- break;
- }
-
- if (pci_lanes <= 0x20)
- *width = pci_lanes;
- else
- *width = 0xFF;
-}
-
-static void skd_do_inq_page_da(struct skd_device *skdev,
- volatile struct fit_completion_entry_v1 *skcomp,
- volatile struct fit_comp_error_info *skerr,
- uint8_t *cdb, uint8_t *buf)
-{
- struct pci_dev *pdev = skdev->pdev;
- unsigned max_bytes;
- struct driver_inquiry_data inq;
- u16 val;
-
- pr_debug("%s:%s:%d skd_do_driver_inquiry: return driver page\n",
- skdev->name, __func__, __LINE__);
-
- memset(&inq, 0, sizeof(inq));
-
- inq.page_code = DRIVER_INQ_EVPD_PAGE_CODE;
-
- skd_get_link_info(pdev, &inq.pcie_link_speed, &inq.pcie_link_lanes);
- inq.pcie_bus_number = cpu_to_be16(pdev->bus->number);
- inq.pcie_device_number = PCI_SLOT(pdev->devfn);
- inq.pcie_function_number = PCI_FUNC(pdev->devfn);
-
- pci_read_config_word(pdev, PCI_VENDOR_ID, &val);
- inq.pcie_vendor_id = cpu_to_be16(val);
-
- pci_read_config_word(pdev, PCI_DEVICE_ID, &val);
- inq.pcie_device_id = cpu_to_be16(val);
-
- pci_read_config_word(pdev, PCI_SUBSYSTEM_VENDOR_ID, &val);
- inq.pcie_subsystem_vendor_id = cpu_to_be16(val);
-
- pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &val);
- inq.pcie_subsystem_device_id = cpu_to_be16(val);
-
- /* Driver version, fixed lenth, padded with spaces on the right */
- inq.driver_version_length = sizeof(inq.driver_version);
- memset(&inq.driver_version, ' ', sizeof(inq.driver_version));
- memcpy(inq.driver_version, DRV_VER_COMPL,
- min(sizeof(inq.driver_version), strlen(DRV_VER_COMPL)));
-
- inq.page_length = cpu_to_be16((sizeof(inq) - 4));
-
- /* Clear the error set by the device */
- skcomp->status = SAM_STAT_GOOD;
- memset((void *)skerr, 0, sizeof(*skerr));
-
- /* copy response into output buffer */
- max_bytes = (cdb[3] << 8) | cdb[4];
- memcpy(buf, &inq, min_t(unsigned, max_bytes, sizeof(inq)));
-
- skcomp->num_returned_bytes =
- be32_to_cpu(min_t(uint16_t, max_bytes, sizeof(inq)));
-}
-
-static void skd_do_driver_inq(struct skd_device *skdev,
- volatile struct fit_completion_entry_v1 *skcomp,
- volatile struct fit_comp_error_info *skerr,
- uint8_t *cdb, uint8_t *buf)
-{
- if (!buf)
- return;
- else if (cdb[0] != INQUIRY)
- return; /* Not an INQUIRY */
- else if ((cdb[1] & 1) == 0)
- return; /* EVPD not set */
- else if (cdb[2] == 0)
- /* Need to add driver's page to supported pages list */
- skd_do_inq_page_00(skdev, skcomp, skerr, cdb, buf);
- else if (cdb[2] == DRIVER_INQ_EVPD_PAGE_CODE)
- /* Caller requested driver's page */
- skd_do_inq_page_da(skdev, skcomp, skerr, cdb, buf);
-}
-
-static unsigned char *skd_sg_1st_page_ptr(struct scatterlist *sg)
-{
- if (!sg)
- return NULL;
- if (!sg_page(sg))
- return NULL;
- return sg_virt(sg);
-}
-
-static void skd_process_scsi_inq(struct skd_device *skdev,
- volatile struct fit_completion_entry_v1
- *skcomp,
- volatile struct fit_comp_error_info *skerr,
- struct skd_special_context *skspcl)
-{
- uint8_t *buf;
- struct fit_msg_hdr *fmh = (struct fit_msg_hdr *)skspcl->msg_buf;
- struct skd_scsi_request *scsi_req = (struct skd_scsi_request *)&fmh[1];
-
- dma_sync_sg_for_cpu(skdev->class_dev, skspcl->req.sg, skspcl->req.n_sg,
- skspcl->req.sg_data_dir);
- buf = skd_sg_1st_page_ptr(skspcl->req.sg);
-
- if (buf)
- skd_do_driver_inq(skdev, skcomp, skerr, scsi_req->cdb, buf);
-}
-
-
static int skd_isr_completion_posted(struct skd_device *skdev,
int limit, int *enqueued)
{
- volatile struct fit_completion_entry_v1 *skcmp = NULL;
- volatile struct fit_comp_error_info *skerr;
+ struct fit_completion_entry_v1 *skcmp;
+ struct fit_comp_error_info *skerr;
u16 req_id;
- u32 req_slot;
+ u32 tag;
+ u16 hwq = 0;
+ struct request *rq;
struct skd_request_context *skreq;
- u16 cmp_cntxt = 0;
- u8 cmp_status = 0;
- u8 cmp_cycle = 0;
- u32 cmp_bytes = 0;
+ u16 cmp_cntxt;
+ u8 cmp_status;
+ u8 cmp_cycle;
+ u32 cmp_bytes;
int rc = 0;
int processed = 0;
+ lockdep_assert_held(&skdev->lock);
+
for (;; ) {
SKD_ASSERT(skdev->skcomp_ix < SKD_N_COMPLETION_ENTRY);
@@ -2652,16 +1479,14 @@ static int skd_isr_completion_posted(struct skd_device *skdev,
skerr = &skdev->skerr_table[skdev->skcomp_ix];
- pr_debug("%s:%s:%d "
- "cycle=%d ix=%d got cycle=%d cmdctxt=0x%x stat=%d "
- "busy=%d rbytes=0x%x proto=%d\n",
- skdev->name, __func__, __LINE__, skdev->skcomp_cycle,
- skdev->skcomp_ix, cmp_cycle, cmp_cntxt, cmp_status,
- skdev->in_flight, cmp_bytes, skdev->proto_ver);
+ dev_dbg(&skdev->pdev->dev,
+ "cycle=%d ix=%d got cycle=%d cmdctxt=0x%x stat=%d busy=%d rbytes=0x%x proto=%d\n",
+ skdev->skcomp_cycle, skdev->skcomp_ix, cmp_cycle,
+ cmp_cntxt, cmp_status, skd_in_flight(skdev),
+ cmp_bytes, skdev->proto_ver);
if (cmp_cycle != skdev->skcomp_cycle) {
- pr_debug("%s:%s:%d end of completions\n",
- skdev->name, __func__, __LINE__);
+ dev_dbg(&skdev->pdev->dev, "end of completions\n");
break;
}
/*
@@ -2680,49 +1505,38 @@ static int skd_isr_completion_posted(struct skd_device *skdev,
* r/w request (see skd_start() above) or a special request.
*/
req_id = cmp_cntxt;
- req_slot = req_id & SKD_ID_SLOT_AND_TABLE_MASK;
+ tag = req_id & SKD_ID_SLOT_AND_TABLE_MASK;
/* Is this other than a r/w request? */
- if (req_slot >= skdev->num_req_context) {
+ if (tag >= skdev->num_req_context) {
/*
* This is not a completion for a r/w request.
*/
+ WARN_ON_ONCE(blk_mq_tag_to_rq(skdev->tag_set.tags[hwq],
+ tag));
skd_complete_other(skdev, skcmp, skerr);
continue;
}
- skreq = &skdev->skreq_table[req_slot];
+ rq = blk_mq_tag_to_rq(skdev->tag_set.tags[hwq], tag);
+ if (WARN(!rq, "No request for tag %#x -> %#x\n", cmp_cntxt,
+ tag))
+ continue;
+ skreq = blk_mq_rq_to_pdu(rq);
/*
* Make sure the request ID for the slot matches.
*/
if (skreq->id != req_id) {
- pr_debug("%s:%s:%d mismatch comp_id=0x%x req_id=0x%x\n",
- skdev->name, __func__, __LINE__,
- req_id, skreq->id);
- {
- u16 new_id = cmp_cntxt;
- pr_err("(%s): Completion mismatch "
- "comp_id=0x%04x skreq=0x%04x new=0x%04x\n",
- skd_name(skdev), req_id,
- skreq->id, new_id);
+ dev_err(&skdev->pdev->dev,
+ "Completion mismatch comp_id=0x%04x skreq=0x%04x new=0x%04x\n",
+ req_id, skreq->id, cmp_cntxt);
- continue;
- }
+ continue;
}
SKD_ASSERT(skreq->state == SKD_REQ_STATE_BUSY);
- if (skreq->state == SKD_REQ_STATE_ABORTED) {
- pr_debug("%s:%s:%d reclaim req %p id=%04x\n",
- skdev->name, __func__, __LINE__,
- skreq, skreq->id);
- /* a previously timed out command can
- * now be cleaned up */
- skd_release_skreq(skdev, skreq);
- continue;
- }
-
skreq->completion = *skcmp;
if (unlikely(cmp_status == SAM_STAT_CHECK_CONDITION)) {
skreq->err_info = *skerr;
@@ -2734,27 +1548,17 @@ static int skd_isr_completion_posted(struct skd_device *skdev,
if (skreq->n_sg > 0)
skd_postop_sg_list(skdev, skreq);
- if (!skreq->req) {
- pr_debug("%s:%s:%d NULL backptr skdreq %p, "
- "req=0x%x req_id=0x%x\n",
- skdev->name, __func__, __LINE__,
- skreq, skreq->id, req_id);
- } else {
- /*
- * Capture the outcome and post it back to the
- * native request.
- */
- if (likely(cmp_status == SAM_STAT_GOOD))
- skd_end_request(skdev, skreq, BLK_STS_OK);
- else
- skd_resolve_req_exception(skdev, skreq);
- }
+ skd_release_skreq(skdev, skreq);
/*
- * Release the skreq, its FIT msg (if one), timeout slot,
- * and queue depth.
+ * Capture the outcome and post it back to the native request.
*/
- skd_release_skreq(skdev, skreq);
+ if (likely(cmp_status == SAM_STAT_GOOD)) {
+ skreq->status = BLK_STS_OK;
+ blk_mq_complete_request(rq);
+ } else {
+ skd_resolve_req_exception(skdev, skreq, rq);
+ }
/* skd_isr_comp_limit equal zero means no limit */
if (limit) {
@@ -2765,8 +1569,8 @@ static int skd_isr_completion_posted(struct skd_device *skdev,
}
}
- if ((skdev->state == SKD_DRVR_STATE_PAUSING)
- && (skdev->in_flight) == 0) {
+ if (skdev->state == SKD_DRVR_STATE_PAUSING &&
+ skd_in_flight(skdev) == 0) {
skdev->state = SKD_DRVR_STATE_PAUSED;
wake_up_interruptible(&skdev->waitq);
}
@@ -2775,21 +1579,22 @@ static int skd_isr_completion_posted(struct skd_device *skdev,
}
static void skd_complete_other(struct skd_device *skdev,
- volatile struct fit_completion_entry_v1 *skcomp,
- volatile struct fit_comp_error_info *skerr)
+ struct fit_completion_entry_v1 *skcomp,
+ struct fit_comp_error_info *skerr)
{
u32 req_id = 0;
u32 req_table;
u32 req_slot;
struct skd_special_context *skspcl;
+ lockdep_assert_held(&skdev->lock);
+
req_id = skcomp->tag;
req_table = req_id & SKD_ID_TABLE_MASK;
req_slot = req_id & SKD_ID_SLOT_MASK;
- pr_debug("%s:%s:%d table=0x%x id=0x%x slot=%d\n",
- skdev->name, __func__, __LINE__,
- req_table, req_id, req_slot);
+ dev_dbg(&skdev->pdev->dev, "table=0x%x id=0x%x slot=%d\n", req_table,
+ req_id, req_slot);
/*
* Based on the request id, determine how to dispatch this completion.
@@ -2799,28 +1604,12 @@ static void skd_complete_other(struct skd_device *skdev,
switch (req_table) {
case SKD_ID_RW_REQUEST:
/*
- * The caller, skd_completion_posted_isr() above,
+ * The caller, skd_isr_completion_posted() above,
* handles r/w requests. The only way we get here
* is if the req_slot is out of bounds.
*/
break;
- case SKD_ID_SPECIAL_REQUEST:
- /*
- * Make sure the req_slot is in bounds and that the id
- * matches.
- */
- if (req_slot < skdev->n_special) {
- skspcl = &skdev->skspcl_table[req_slot];
- if (skspcl->req.id == req_id &&
- skspcl->req.state == SKD_REQ_STATE_BUSY) {
- skd_complete_special(skdev,
- skcomp, skerr, skspcl);
- return;
- }
- }
- break;
-
case SKD_ID_INTERNAL:
if (req_slot == 0) {
skspcl = &skdev->internal_skspcl;
@@ -2851,72 +1640,9 @@ static void skd_complete_other(struct skd_device *skdev,
*/
}
-static void skd_complete_special(struct skd_device *skdev,
- volatile struct fit_completion_entry_v1
- *skcomp,
- volatile struct fit_comp_error_info *skerr,
- struct skd_special_context *skspcl)
-{
- pr_debug("%s:%s:%d completing special request %p\n",
- skdev->name, __func__, __LINE__, skspcl);
- if (skspcl->orphaned) {
- /* Discard orphaned request */
- /* ?: Can this release directly or does it need
- * to use a worker? */
- pr_debug("%s:%s:%d release orphaned %p\n",
- skdev->name, __func__, __LINE__, skspcl);
- skd_release_special(skdev, skspcl);
- return;
- }
-
- skd_process_scsi_inq(skdev, skcomp, skerr, skspcl);
-
- skspcl->req.state = SKD_REQ_STATE_COMPLETED;
- skspcl->req.completion = *skcomp;
- skspcl->req.err_info = *skerr;
-
- skd_log_check_status(skdev, skspcl->req.completion.status, skerr->key,
- skerr->code, skerr->qual, skerr->fruc);
-
- wake_up_interruptible(&skdev->waitq);
-}
-
-/* assume spinlock is already held */
-static void skd_release_special(struct skd_device *skdev,
- struct skd_special_context *skspcl)
-{
- int i, was_depleted;
-
- for (i = 0; i < skspcl->req.n_sg; i++) {
- struct page *page = sg_page(&skspcl->req.sg[i]);
- __free_page(page);
- }
-
- was_depleted = (skdev->skspcl_free_list == NULL);
-
- skspcl->req.state = SKD_REQ_STATE_IDLE;
- skspcl->req.id += SKD_ID_INCR;
- skspcl->req.next =
- (struct skd_request_context *)skdev->skspcl_free_list;
- skdev->skspcl_free_list = (struct skd_special_context *)skspcl;
-
- if (was_depleted) {
- pr_debug("%s:%s:%d skspcl was depleted\n",
- skdev->name, __func__, __LINE__);
- /* Free list was depleted. Their might be waiters. */
- wake_up_interruptible(&skdev->waitq);
- }
-}
-
static void skd_reset_skcomp(struct skd_device *skdev)
{
- u32 nbytes;
- struct fit_completion_entry_v1 *skcomp;
-
- nbytes = sizeof(*skcomp) * SKD_N_COMPLETION_ENTRY;
- nbytes += sizeof(struct fit_comp_error_info) * SKD_N_COMPLETION_ENTRY;
-
- memset(skdev->skcomp_table, 0, nbytes);
+ memset(skdev->skcomp_table, 0, SKD_SKCOMP_SIZE);
skdev->skcomp_ix = 0;
skdev->skcomp_cycle = 1;
@@ -2941,7 +1667,7 @@ static void skd_completion_worker(struct work_struct *work)
* process everything in compq
*/
skd_isr_completion_posted(skdev, 0, &flush_enqueued);
- skd_request_fn(skdev->queue);
+ schedule_work(&skdev->start_queue);
spin_unlock_irqrestore(&skdev->lock, flags);
}
@@ -2951,14 +1677,13 @@ static void skd_isr_msg_from_dev(struct skd_device *skdev);
static irqreturn_t
skd_isr(int irq, void *ptr)
{
- struct skd_device *skdev;
+ struct skd_device *skdev = ptr;
u32 intstat;
u32 ack;
int rc = 0;
int deferred = 0;
int flush_enqueued = 0;
- skdev = (struct skd_device *)ptr;
spin_lock(&skdev->lock);
for (;; ) {
@@ -2967,8 +1692,8 @@ skd_isr(int irq, void *ptr)
ack = FIT_INT_DEF_MASK;
ack &= intstat;
- pr_debug("%s:%s:%d intstat=0x%x ack=0x%x\n",
- skdev->name, __func__, __LINE__, intstat, ack);
+ dev_dbg(&skdev->pdev->dev, "intstat=0x%x ack=0x%x\n", intstat,
+ ack);
/* As long as there is an int pending on device, keep
* running loop. When none, get out, but if we've never
@@ -3018,12 +1743,12 @@ skd_isr(int irq, void *ptr)
}
if (unlikely(flush_enqueued))
- skd_request_fn(skdev->queue);
+ schedule_work(&skdev->start_queue);
if (deferred)
schedule_work(&skdev->completion_worker);
else if (!flush_enqueued)
- skd_request_fn(skdev->queue);
+ schedule_work(&skdev->start_queue);
spin_unlock(&skdev->lock);
@@ -3033,13 +1758,13 @@ skd_isr(int irq, void *ptr)
static void skd_drive_fault(struct skd_device *skdev)
{
skdev->state = SKD_DRVR_STATE_FAULT;
- pr_err("(%s): Drive FAULT\n", skd_name(skdev));
+ dev_err(&skdev->pdev->dev, "Drive FAULT\n");
}
static void skd_drive_disappeared(struct skd_device *skdev)
{
skdev->state = SKD_DRVR_STATE_DISAPPEARED;
- pr_err("(%s): Drive DISAPPEARED\n", skd_name(skdev));
+ dev_err(&skdev->pdev->dev, "Drive DISAPPEARED\n");
}
static void skd_isr_fwstate(struct skd_device *skdev)
@@ -3052,10 +1777,9 @@ static void skd_isr_fwstate(struct skd_device *skdev)
sense = SKD_READL(skdev, FIT_STATUS);
state = sense & FIT_SR_DRIVE_STATE_MASK;
- pr_err("(%s): s1120 state %s(%d)=>%s(%d)\n",
- skd_name(skdev),
- skd_drive_state_to_str(skdev->drive_state), skdev->drive_state,
- skd_drive_state_to_str(state), state);
+ dev_err(&skdev->pdev->dev, "s1120 state %s(%d)=>%s(%d)\n",
+ skd_drive_state_to_str(skdev->drive_state), skdev->drive_state,
+ skd_drive_state_to_str(state), state);
skdev->drive_state = state;
@@ -3066,7 +1790,7 @@ static void skd_isr_fwstate(struct skd_device *skdev)
break;
}
if (skdev->state == SKD_DRVR_STATE_RESTARTING)
- skd_recover_requests(skdev, 0);
+ skd_recover_requests(skdev);
if (skdev->state == SKD_DRVR_STATE_WAIT_BOOT) {
skdev->timer_countdown = SKD_STARTING_TIMO;
skdev->state = SKD_DRVR_STATE_STARTING;
@@ -3087,11 +1811,11 @@ static void skd_isr_fwstate(struct skd_device *skdev)
skdev->cur_max_queue_depth * 2 / 3 + 1;
if (skdev->queue_low_water_mark < 1)
skdev->queue_low_water_mark = 1;
- pr_info(
- "(%s): Queue depth limit=%d dev=%d lowat=%d\n",
- skd_name(skdev),
- skdev->cur_max_queue_depth,
- skdev->dev_max_queue_depth, skdev->queue_low_water_mark);
+ dev_info(&skdev->pdev->dev,
+ "Queue depth limit=%d dev=%d lowat=%d\n",
+ skdev->cur_max_queue_depth,
+ skdev->dev_max_queue_depth,
+ skdev->queue_low_water_mark);
skd_refresh_device_data(skdev);
break;
@@ -3107,7 +1831,7 @@ static void skd_isr_fwstate(struct skd_device *skdev)
*/
skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE;
skdev->timer_countdown = SKD_TIMER_SECONDS(3);
- blk_start_queue(skdev->queue);
+ schedule_work(&skdev->start_queue);
break;
case FIT_SR_DRIVE_BUSY_ERASE:
skdev->state = SKD_DRVR_STATE_BUSY_ERASE;
@@ -3128,8 +1852,7 @@ static void skd_isr_fwstate(struct skd_device *skdev)
}
break;
case FIT_SR_DRIVE_FW_BOOTING:
- pr_debug("%s:%s:%d ISR FIT_SR_DRIVE_FW_BOOTING %s\n",
- skdev->name, __func__, __LINE__, skdev->name);
+ dev_dbg(&skdev->pdev->dev, "ISR FIT_SR_DRIVE_FW_BOOTING\n");
skdev->state = SKD_DRVR_STATE_WAIT_BOOT;
skdev->timer_countdown = SKD_WAIT_BOOT_TIMO;
break;
@@ -3141,17 +1864,17 @@ static void skd_isr_fwstate(struct skd_device *skdev)
case FIT_SR_DRIVE_FAULT:
skd_drive_fault(skdev);
- skd_recover_requests(skdev, 0);
- blk_start_queue(skdev->queue);
+ skd_recover_requests(skdev);
+ schedule_work(&skdev->start_queue);
break;
/* PCIe bus returned all Fs? */
case 0xFF:
- pr_info("(%s): state=0x%x sense=0x%x\n",
- skd_name(skdev), state, sense);
+ dev_info(&skdev->pdev->dev, "state=0x%x sense=0x%x\n", state,
+ sense);
skd_drive_disappeared(skdev);
- skd_recover_requests(skdev, 0);
- blk_start_queue(skdev->queue);
+ skd_recover_requests(skdev);
+ schedule_work(&skdev->start_queue);
break;
default:
/*
@@ -3159,92 +1882,33 @@ static void skd_isr_fwstate(struct skd_device *skdev)
*/
break;
}
- pr_err("(%s): Driver state %s(%d)=>%s(%d)\n",
- skd_name(skdev),
- skd_skdev_state_to_str(prev_driver_state), prev_driver_state,
- skd_skdev_state_to_str(skdev->state), skdev->state);
+ dev_err(&skdev->pdev->dev, "Driver state %s(%d)=>%s(%d)\n",
+ skd_skdev_state_to_str(prev_driver_state), prev_driver_state,
+ skd_skdev_state_to_str(skdev->state), skdev->state);
}
-static void skd_recover_requests(struct skd_device *skdev, int requeue)
+static void skd_recover_request(struct request *req, void *data, bool reserved)
{
- int i;
-
- for (i = 0; i < skdev->num_req_context; i++) {
- struct skd_request_context *skreq = &skdev->skreq_table[i];
-
- if (skreq->state == SKD_REQ_STATE_BUSY) {
- skd_log_skreq(skdev, skreq, "recover");
-
- SKD_ASSERT((skreq->id & SKD_ID_INCR) != 0);
- SKD_ASSERT(skreq->req != NULL);
-
- /* Release DMA resources for the request. */
- if (skreq->n_sg > 0)
- skd_postop_sg_list(skdev, skreq);
-
- if (requeue &&
- (unsigned long) ++skreq->req->special <
- SKD_MAX_RETRIES)
- blk_requeue_request(skdev->queue, skreq->req);
- else
- skd_end_request(skdev, skreq, BLK_STS_IOERR);
-
- skreq->req = NULL;
-
- skreq->state = SKD_REQ_STATE_IDLE;
- skreq->id += SKD_ID_INCR;
- }
- if (i > 0)
- skreq[-1].next = skreq;
- skreq->next = NULL;
- }
- skdev->skreq_free_list = skdev->skreq_table;
+ struct skd_device *const skdev = data;
+ struct skd_request_context *skreq = blk_mq_rq_to_pdu(req);
- for (i = 0; i < skdev->num_fitmsg_context; i++) {
- struct skd_fitmsg_context *skmsg = &skdev->skmsg_table[i];
+ if (skreq->state != SKD_REQ_STATE_BUSY)
+ return;
- if (skmsg->state == SKD_MSG_STATE_BUSY) {
- skd_log_skmsg(skdev, skmsg, "salvaged");
- SKD_ASSERT((skmsg->id & SKD_ID_INCR) != 0);
- skmsg->state = SKD_MSG_STATE_IDLE;
- skmsg->id += SKD_ID_INCR;
- }
- if (i > 0)
- skmsg[-1].next = skmsg;
- skmsg->next = NULL;
- }
- skdev->skmsg_free_list = skdev->skmsg_table;
+ skd_log_skreq(skdev, skreq, "recover");
- for (i = 0; i < skdev->n_special; i++) {
- struct skd_special_context *skspcl = &skdev->skspcl_table[i];
+ /* Release DMA resources for the request. */
+ if (skreq->n_sg > 0)
+ skd_postop_sg_list(skdev, skreq);
- /* If orphaned, reclaim it because it has already been reported
- * to the process as an error (it was just waiting for
- * a completion that didn't come, and now it will never come)
- * If busy, change to a state that will cause it to error
- * out in the wait routine and let it do the normal
- * reporting and reclaiming
- */
- if (skspcl->req.state == SKD_REQ_STATE_BUSY) {
- if (skspcl->orphaned) {
- pr_debug("%s:%s:%d orphaned %p\n",
- skdev->name, __func__, __LINE__,
- skspcl);
- skd_release_special(skdev, skspcl);
- } else {
- pr_debug("%s:%s:%d not orphaned %p\n",
- skdev->name, __func__, __LINE__,
- skspcl);
- skspcl->req.state = SKD_REQ_STATE_ABORTED;
- }
- }
- }
- skdev->skspcl_free_list = skdev->skspcl_table;
-
- for (i = 0; i < SKD_N_TIMEOUT_SLOT; i++)
- skdev->timeout_slot[i] = 0;
+ skreq->state = SKD_REQ_STATE_IDLE;
+ skreq->status = BLK_STS_IOERR;
+ blk_mq_complete_request(req);
+}
- skdev->in_flight = 0;
+static void skd_recover_requests(struct skd_device *skdev)
+{
+ blk_mq_tagset_busy_iter(&skdev->tag_set, skd_recover_request, skdev);
}
static void skd_isr_msg_from_dev(struct skd_device *skdev)
@@ -3255,8 +1919,8 @@ static void skd_isr_msg_from_dev(struct skd_device *skdev)
mfd = SKD_READL(skdev, FIT_MSG_FROM_DEVICE);
- pr_debug("%s:%s:%d mfd=0x%x last_mtd=0x%x\n",
- skdev->name, __func__, __LINE__, mfd, skdev->last_mtd);
+ dev_dbg(&skdev->pdev->dev, "mfd=0x%x last_mtd=0x%x\n", mfd,
+ skdev->last_mtd);
/* ignore any mtd that is an ack for something we didn't send */
if (FIT_MXD_TYPE(mfd) != FIT_MXD_TYPE(skdev->last_mtd))
@@ -3267,13 +1931,10 @@ static void skd_isr_msg_from_dev(struct skd_device *skdev)
skdev->proto_ver = FIT_PROTOCOL_MAJOR_VER(mfd);
if (skdev->proto_ver != FIT_PROTOCOL_VERSION_1) {
- pr_err("(%s): protocol mismatch\n",
- skdev->name);
- pr_err("(%s): got=%d support=%d\n",
- skdev->name, skdev->proto_ver,
- FIT_PROTOCOL_VERSION_1);
- pr_err("(%s): please upgrade driver\n",
- skdev->name);
+ dev_err(&skdev->pdev->dev, "protocol mismatch\n");
+ dev_err(&skdev->pdev->dev, " got=%d support=%d\n",
+ skdev->proto_ver, FIT_PROTOCOL_VERSION_1);
+ dev_err(&skdev->pdev->dev, " please upgrade driver\n");
skdev->state = SKD_DRVR_STATE_PROTOCOL_MISMATCH;
skd_soft_reset(skdev);
break;
@@ -3327,9 +1988,8 @@ static void skd_isr_msg_from_dev(struct skd_device *skdev)
SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
skdev->last_mtd = mtd;
- pr_err("(%s): Time sync driver=0x%x device=0x%x\n",
- skd_name(skdev),
- skdev->connect_time_stamp, skdev->drive_jiffies);
+ dev_err(&skdev->pdev->dev, "Time sync driver=0x%x device=0x%x\n",
+ skdev->connect_time_stamp, skdev->drive_jiffies);
break;
case FIT_MTD_ARM_QUEUE:
@@ -3351,8 +2011,7 @@ static void skd_disable_interrupts(struct skd_device *skdev)
sense = SKD_READL(skdev, FIT_CONTROL);
sense &= ~FIT_CR_ENABLE_INTERRUPTS;
SKD_WRITEL(skdev, sense, FIT_CONTROL);
- pr_debug("%s:%s:%d sense 0x%x\n",
- skdev->name, __func__, __LINE__, sense);
+ dev_dbg(&skdev->pdev->dev, "sense 0x%x\n", sense);
/* Note that the 1s is written. A 1-bit means
* disable, a 0 means enable.
@@ -3371,13 +2030,11 @@ static void skd_enable_interrupts(struct skd_device *skdev)
/* Note that the compliment of mask is written. A 1-bit means
* disable, a 0 means enable. */
SKD_WRITEL(skdev, ~val, FIT_INT_MASK_HOST);
- pr_debug("%s:%s:%d interrupt mask=0x%x\n",
- skdev->name, __func__, __LINE__, ~val);
+ dev_dbg(&skdev->pdev->dev, "interrupt mask=0x%x\n", ~val);
val = SKD_READL(skdev, FIT_CONTROL);
val |= FIT_CR_ENABLE_INTERRUPTS;
- pr_debug("%s:%s:%d control=0x%x\n",
- skdev->name, __func__, __LINE__, val);
+ dev_dbg(&skdev->pdev->dev, "control=0x%x\n", val);
SKD_WRITEL(skdev, val, FIT_CONTROL);
}
@@ -3393,8 +2050,7 @@ static void skd_soft_reset(struct skd_device *skdev)
val = SKD_READL(skdev, FIT_CONTROL);
val |= (FIT_CR_SOFT_RESET);
- pr_debug("%s:%s:%d control=0x%x\n",
- skdev->name, __func__, __LINE__, val);
+ dev_dbg(&skdev->pdev->dev, "control=0x%x\n", val);
SKD_WRITEL(skdev, val, FIT_CONTROL);
}
@@ -3411,8 +2067,7 @@ static void skd_start_device(struct skd_device *skdev)
sense = SKD_READL(skdev, FIT_STATUS);
- pr_debug("%s:%s:%d initial status=0x%x\n",
- skdev->name, __func__, __LINE__, sense);
+ dev_dbg(&skdev->pdev->dev, "initial status=0x%x\n", sense);
state = sense & FIT_SR_DRIVE_STATE_MASK;
skdev->drive_state = state;
@@ -3425,25 +2080,23 @@ static void skd_start_device(struct skd_device *skdev)
switch (skdev->drive_state) {
case FIT_SR_DRIVE_OFFLINE:
- pr_err("(%s): Drive offline...\n", skd_name(skdev));
+ dev_err(&skdev->pdev->dev, "Drive offline...\n");
break;
case FIT_SR_DRIVE_FW_BOOTING:
- pr_debug("%s:%s:%d FIT_SR_DRIVE_FW_BOOTING %s\n",
- skdev->name, __func__, __LINE__, skdev->name);
+ dev_dbg(&skdev->pdev->dev, "FIT_SR_DRIVE_FW_BOOTING\n");
skdev->state = SKD_DRVR_STATE_WAIT_BOOT;
skdev->timer_countdown = SKD_WAIT_BOOT_TIMO;
break;
case FIT_SR_DRIVE_BUSY_SANITIZE:
- pr_info("(%s): Start: BUSY_SANITIZE\n",
- skd_name(skdev));
+ dev_info(&skdev->pdev->dev, "Start: BUSY_SANITIZE\n");
skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE;
skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
break;
case FIT_SR_DRIVE_BUSY_ERASE:
- pr_info("(%s): Start: BUSY_ERASE\n", skd_name(skdev));
+ dev_info(&skdev->pdev->dev, "Start: BUSY_ERASE\n");
skdev->state = SKD_DRVR_STATE_BUSY_ERASE;
skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
break;
@@ -3454,14 +2107,13 @@ static void skd_start_device(struct skd_device *skdev)
break;
case FIT_SR_DRIVE_BUSY:
- pr_err("(%s): Drive Busy...\n", skd_name(skdev));
+ dev_err(&skdev->pdev->dev, "Drive Busy...\n");
skdev->state = SKD_DRVR_STATE_BUSY;
skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
break;
case FIT_SR_DRIVE_SOFT_RESET:
- pr_err("(%s) drive soft reset in prog\n",
- skd_name(skdev));
+ dev_err(&skdev->pdev->dev, "drive soft reset in prog\n");
break;
case FIT_SR_DRIVE_FAULT:
@@ -3471,9 +2123,8 @@ static void skd_start_device(struct skd_device *skdev)
*/
skd_drive_fault(skdev);
/*start the queue so we can respond with error to requests */
- pr_debug("%s:%s:%d starting %s queue\n",
- skdev->name, __func__, __LINE__, skdev->name);
- blk_start_queue(skdev->queue);
+ dev_dbg(&skdev->pdev->dev, "starting queue\n");
+ schedule_work(&skdev->start_queue);
skdev->gendisk_on = -1;
wake_up_interruptible(&skdev->waitq);
break;
@@ -3483,38 +2134,33 @@ static void skd_start_device(struct skd_device *skdev)
* to the BAR1 addresses. */
skd_drive_disappeared(skdev);
/*start the queue so we can respond with error to requests */
- pr_debug("%s:%s:%d starting %s queue to error-out reqs\n",
- skdev->name, __func__, __LINE__, skdev->name);
- blk_start_queue(skdev->queue);
+ dev_dbg(&skdev->pdev->dev,
+ "starting queue to error-out reqs\n");
+ schedule_work(&skdev->start_queue);
skdev->gendisk_on = -1;
wake_up_interruptible(&skdev->waitq);
break;
default:
- pr_err("(%s) Start: unknown state %x\n",
- skd_name(skdev), skdev->drive_state);
+ dev_err(&skdev->pdev->dev, "Start: unknown state %x\n",
+ skdev->drive_state);
break;
}
state = SKD_READL(skdev, FIT_CONTROL);
- pr_debug("%s:%s:%d FIT Control Status=0x%x\n",
- skdev->name, __func__, __LINE__, state);
+ dev_dbg(&skdev->pdev->dev, "FIT Control Status=0x%x\n", state);
state = SKD_READL(skdev, FIT_INT_STATUS_HOST);
- pr_debug("%s:%s:%d Intr Status=0x%x\n",
- skdev->name, __func__, __LINE__, state);
+ dev_dbg(&skdev->pdev->dev, "Intr Status=0x%x\n", state);
state = SKD_READL(skdev, FIT_INT_MASK_HOST);
- pr_debug("%s:%s:%d Intr Mask=0x%x\n",
- skdev->name, __func__, __LINE__, state);
+ dev_dbg(&skdev->pdev->dev, "Intr Mask=0x%x\n", state);
state = SKD_READL(skdev, FIT_MSG_FROM_DEVICE);
- pr_debug("%s:%s:%d Msg from Dev=0x%x\n",
- skdev->name, __func__, __LINE__, state);
+ dev_dbg(&skdev->pdev->dev, "Msg from Dev=0x%x\n", state);
state = SKD_READL(skdev, FIT_HW_VERSION);
- pr_debug("%s:%s:%d HW version=0x%x\n",
- skdev->name, __func__, __LINE__, state);
+ dev_dbg(&skdev->pdev->dev, "HW version=0x%x\n", state);
spin_unlock_irqrestore(&skdev->lock, flags);
}
@@ -3529,14 +2175,12 @@ static void skd_stop_device(struct skd_device *skdev)
spin_lock_irqsave(&skdev->lock, flags);
if (skdev->state != SKD_DRVR_STATE_ONLINE) {
- pr_err("(%s): skd_stop_device not online no sync\n",
- skd_name(skdev));
+ dev_err(&skdev->pdev->dev, "%s not online no sync\n", __func__);
goto stop_out;
}
if (skspcl->req.state != SKD_REQ_STATE_IDLE) {
- pr_err("(%s): skd_stop_device no special\n",
- skd_name(skdev));
+ dev_err(&skdev->pdev->dev, "%s no special\n", __func__);
goto stop_out;
}
@@ -3554,16 +2198,13 @@ static void skd_stop_device(struct skd_device *skdev)
switch (skdev->sync_done) {
case 0:
- pr_err("(%s): skd_stop_device no sync\n",
- skd_name(skdev));
+ dev_err(&skdev->pdev->dev, "%s no sync\n", __func__);
break;
case 1:
- pr_err("(%s): skd_stop_device sync done\n",
- skd_name(skdev));
+ dev_err(&skdev->pdev->dev, "%s sync done\n", __func__);
break;
default:
- pr_err("(%s): skd_stop_device sync error\n",
- skd_name(skdev));
+ dev_err(&skdev->pdev->dev, "%s sync error\n", __func__);
}
stop_out:
@@ -3593,8 +2234,8 @@ stop_out:
}
if (dev_state != FIT_SR_DRIVE_INIT)
- pr_err("(%s): skd_stop_device state error 0x%02x\n",
- skd_name(skdev), dev_state);
+ dev_err(&skdev->pdev->dev, "%s state error 0x%02x\n", __func__,
+ dev_state);
}
/* assume spinlock is held */
@@ -3607,8 +2248,7 @@ static void skd_restart_device(struct skd_device *skdev)
state = SKD_READL(skdev, FIT_STATUS);
- pr_debug("%s:%s:%d drive status=0x%x\n",
- skdev->name, __func__, __LINE__, state);
+ dev_dbg(&skdev->pdev->dev, "drive status=0x%x\n", state);
state &= FIT_SR_DRIVE_STATE_MASK;
skdev->drive_state = state;
@@ -3628,9 +2268,8 @@ static int skd_quiesce_dev(struct skd_device *skdev)
switch (skdev->state) {
case SKD_DRVR_STATE_BUSY:
case SKD_DRVR_STATE_BUSY_IMMINENT:
- pr_debug("%s:%s:%d stopping %s queue\n",
- skdev->name, __func__, __LINE__, skdev->name);
- blk_stop_queue(skdev->queue);
+ dev_dbg(&skdev->pdev->dev, "stopping queue\n");
+ blk_mq_stop_hw_queues(skdev->queue);
break;
case SKD_DRVR_STATE_ONLINE:
case SKD_DRVR_STATE_STOPPING:
@@ -3642,8 +2281,8 @@ static int skd_quiesce_dev(struct skd_device *skdev)
case SKD_DRVR_STATE_RESUMING:
default:
rc = -EINVAL;
- pr_debug("%s:%s:%d state [%d] not implemented\n",
- skdev->name, __func__, __LINE__, skdev->state);
+ dev_dbg(&skdev->pdev->dev, "state [%d] not implemented\n",
+ skdev->state);
}
return rc;
}
@@ -3655,8 +2294,7 @@ static int skd_unquiesce_dev(struct skd_device *skdev)
skd_log_skdev(skdev, "unquiesce");
if (skdev->state == SKD_DRVR_STATE_ONLINE) {
- pr_debug("%s:%s:%d **** device already ONLINE\n",
- skdev->name, __func__, __LINE__);
+ dev_dbg(&skdev->pdev->dev, "**** device already ONLINE\n");
return 0;
}
if (skdev->drive_state != FIT_SR_DRIVE_ONLINE) {
@@ -3669,8 +2307,7 @@ static int skd_unquiesce_dev(struct skd_device *skdev)
* to become available.
*/
skdev->state = SKD_DRVR_STATE_BUSY;
- pr_debug("%s:%s:%d drive BUSY state\n",
- skdev->name, __func__, __LINE__);
+ dev_dbg(&skdev->pdev->dev, "drive BUSY state\n");
return 0;
}
@@ -3689,26 +2326,24 @@ static int skd_unquiesce_dev(struct skd_device *skdev)
case SKD_DRVR_STATE_IDLE:
case SKD_DRVR_STATE_LOAD:
skdev->state = SKD_DRVR_STATE_ONLINE;
- pr_err("(%s): Driver state %s(%d)=>%s(%d)\n",
- skd_name(skdev),
- skd_skdev_state_to_str(prev_driver_state),
- prev_driver_state, skd_skdev_state_to_str(skdev->state),
- skdev->state);
- pr_debug("%s:%s:%d **** device ONLINE...starting block queue\n",
- skdev->name, __func__, __LINE__);
- pr_debug("%s:%s:%d starting %s queue\n",
- skdev->name, __func__, __LINE__, skdev->name);
- pr_info("(%s): STEC s1120 ONLINE\n", skd_name(skdev));
- blk_start_queue(skdev->queue);
+ dev_err(&skdev->pdev->dev, "Driver state %s(%d)=>%s(%d)\n",
+ skd_skdev_state_to_str(prev_driver_state),
+ prev_driver_state, skd_skdev_state_to_str(skdev->state),
+ skdev->state);
+ dev_dbg(&skdev->pdev->dev,
+ "**** device ONLINE...starting block queue\n");
+ dev_dbg(&skdev->pdev->dev, "starting queue\n");
+ dev_info(&skdev->pdev->dev, "STEC s1120 ONLINE\n");
+ schedule_work(&skdev->start_queue);
skdev->gendisk_on = 1;
wake_up_interruptible(&skdev->waitq);
break;
case SKD_DRVR_STATE_DISAPPEARED:
default:
- pr_debug("%s:%s:%d **** driver state %d, not implemented \n",
- skdev->name, __func__, __LINE__,
- skdev->state);
+ dev_dbg(&skdev->pdev->dev,
+ "**** driver state %d, not implemented\n",
+ skdev->state);
return -EBUSY;
}
return 0;
@@ -3726,11 +2361,10 @@ static irqreturn_t skd_reserved_isr(int irq, void *skd_host_data)
unsigned long flags;
spin_lock_irqsave(&skdev->lock, flags);
- pr_debug("%s:%s:%d MSIX = 0x%x\n",
- skdev->name, __func__, __LINE__,
- SKD_READL(skdev, FIT_INT_STATUS_HOST));
- pr_err("(%s): MSIX reserved irq %d = 0x%x\n", skd_name(skdev),
- irq, SKD_READL(skdev, FIT_INT_STATUS_HOST));
+ dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n",
+ SKD_READL(skdev, FIT_INT_STATUS_HOST));
+ dev_err(&skdev->pdev->dev, "MSIX reserved irq %d = 0x%x\n", irq,
+ SKD_READL(skdev, FIT_INT_STATUS_HOST));
SKD_WRITEL(skdev, FIT_INT_RESERVED_MASK, FIT_INT_STATUS_HOST);
spin_unlock_irqrestore(&skdev->lock, flags);
return IRQ_HANDLED;
@@ -3742,9 +2376,8 @@ static irqreturn_t skd_statec_isr(int irq, void *skd_host_data)
unsigned long flags;
spin_lock_irqsave(&skdev->lock, flags);
- pr_debug("%s:%s:%d MSIX = 0x%x\n",
- skdev->name, __func__, __LINE__,
- SKD_READL(skdev, FIT_INT_STATUS_HOST));
+ dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n",
+ SKD_READL(skdev, FIT_INT_STATUS_HOST));
SKD_WRITEL(skdev, FIT_ISH_FW_STATE_CHANGE, FIT_INT_STATUS_HOST);
skd_isr_fwstate(skdev);
spin_unlock_irqrestore(&skdev->lock, flags);
@@ -3759,19 +2392,18 @@ static irqreturn_t skd_comp_q(int irq, void *skd_host_data)
int deferred;
spin_lock_irqsave(&skdev->lock, flags);
- pr_debug("%s:%s:%d MSIX = 0x%x\n",
- skdev->name, __func__, __LINE__,
- SKD_READL(skdev, FIT_INT_STATUS_HOST));
+ dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n",
+ SKD_READL(skdev, FIT_INT_STATUS_HOST));
SKD_WRITEL(skdev, FIT_ISH_COMPLETION_POSTED, FIT_INT_STATUS_HOST);
deferred = skd_isr_completion_posted(skdev, skd_isr_comp_limit,
&flush_enqueued);
if (flush_enqueued)
- skd_request_fn(skdev->queue);
+ schedule_work(&skdev->start_queue);
if (deferred)
schedule_work(&skdev->completion_worker);
else if (!flush_enqueued)
- skd_request_fn(skdev->queue);
+ schedule_work(&skdev->start_queue);
spin_unlock_irqrestore(&skdev->lock, flags);
@@ -3784,9 +2416,8 @@ static irqreturn_t skd_msg_isr(int irq, void *skd_host_data)
unsigned long flags;
spin_lock_irqsave(&skdev->lock, flags);
- pr_debug("%s:%s:%d MSIX = 0x%x\n",
- skdev->name, __func__, __LINE__,
- SKD_READL(skdev, FIT_INT_STATUS_HOST));
+ dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n",
+ SKD_READL(skdev, FIT_INT_STATUS_HOST));
SKD_WRITEL(skdev, FIT_ISH_MSG_FROM_DEV, FIT_INT_STATUS_HOST);
skd_isr_msg_from_dev(skdev);
spin_unlock_irqrestore(&skdev->lock, flags);
@@ -3799,9 +2430,8 @@ static irqreturn_t skd_qfull_isr(int irq, void *skd_host_data)
unsigned long flags;
spin_lock_irqsave(&skdev->lock, flags);
- pr_debug("%s:%s:%d MSIX = 0x%x\n",
- skdev->name, __func__, __LINE__,
- SKD_READL(skdev, FIT_INT_STATUS_HOST));
+ dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n",
+ SKD_READL(skdev, FIT_INT_STATUS_HOST));
SKD_WRITEL(skdev, FIT_INT_QUEUE_FULL, FIT_INT_STATUS_HOST);
spin_unlock_irqrestore(&skdev->lock, flags);
return IRQ_HANDLED;
@@ -3850,8 +2480,7 @@ static int skd_acquire_msix(struct skd_device *skdev)
rc = pci_alloc_irq_vectors(pdev, SKD_MAX_MSIX_COUNT, SKD_MAX_MSIX_COUNT,
PCI_IRQ_MSIX);
if (rc < 0) {
- pr_err("(%s): failed to enable MSI-X %d\n",
- skd_name(skdev), rc);
+ dev_err(&skdev->pdev->dev, "failed to enable MSI-X %d\n", rc);
goto out;
}
@@ -3859,8 +2488,7 @@ static int skd_acquire_msix(struct skd_device *skdev)
sizeof(struct skd_msix_entry), GFP_KERNEL);
if (!skdev->msix_entries) {
rc = -ENOMEM;
- pr_err("(%s): msix table allocation error\n",
- skd_name(skdev));
+ dev_err(&skdev->pdev->dev, "msix table allocation error\n");
goto out;
}
@@ -3877,16 +2505,15 @@ static int skd_acquire_msix(struct skd_device *skdev)
msix_entries[i].handler, 0,
qentry->isr_name, skdev);
if (rc) {
- pr_err("(%s): Unable to register(%d) MSI-X "
- "handler %d: %s\n",
- skd_name(skdev), rc, i, qentry->isr_name);
+ dev_err(&skdev->pdev->dev,
+ "Unable to register(%d) MSI-X handler %d: %s\n",
+ rc, i, qentry->isr_name);
goto msix_out;
}
}
- pr_debug("%s:%s:%d %s: <%s> msix %d irq(s) enabled\n",
- skdev->name, __func__, __LINE__,
- pci_name(pdev), skdev->name, SKD_MAX_MSIX_COUNT);
+ dev_dbg(&skdev->pdev->dev, "%d msix irq(s) enabled\n",
+ SKD_MAX_MSIX_COUNT);
return 0;
msix_out:
@@ -3909,8 +2536,8 @@ static int skd_acquire_irq(struct skd_device *skdev)
if (!rc)
return 0;
- pr_err("(%s): failed to enable MSI-X, re-trying with MSI %d\n",
- skd_name(skdev), rc);
+ dev_err(&skdev->pdev->dev,
+ "failed to enable MSI-X, re-trying with MSI %d\n", rc);
}
snprintf(skdev->isr_name, sizeof(skdev->isr_name), "%s%d", DRV_NAME,
@@ -3920,8 +2547,8 @@ static int skd_acquire_irq(struct skd_device *skdev)
irq_flag |= PCI_IRQ_MSI;
rc = pci_alloc_irq_vectors(pdev, 1, 1, irq_flag);
if (rc < 0) {
- pr_err("(%s): failed to allocate the MSI interrupt %d\n",
- skd_name(skdev), rc);
+ dev_err(&skdev->pdev->dev,
+ "failed to allocate the MSI interrupt %d\n", rc);
return rc;
}
@@ -3930,8 +2557,8 @@ static int skd_acquire_irq(struct skd_device *skdev)
skdev->isr_name, skdev);
if (rc) {
pci_free_irq_vectors(pdev);
- pr_err("(%s): failed to allocate interrupt %d\n",
- skd_name(skdev), rc);
+ dev_err(&skdev->pdev->dev, "failed to allocate interrupt %d\n",
+ rc);
return rc;
}
@@ -3965,20 +2592,45 @@ static void skd_release_irq(struct skd_device *skdev)
*****************************************************************************
*/
+static void *skd_alloc_dma(struct skd_device *skdev, struct kmem_cache *s,
+ dma_addr_t *dma_handle, gfp_t gfp,
+ enum dma_data_direction dir)
+{
+ struct device *dev = &skdev->pdev->dev;
+ void *buf;
+
+ buf = kmem_cache_alloc(s, gfp);
+ if (!buf)
+ return NULL;
+ *dma_handle = dma_map_single(dev, buf, s->size, dir);
+ if (dma_mapping_error(dev, *dma_handle)) {
+ kfree(buf);
+ buf = NULL;
+ }
+ return buf;
+}
+
+static void skd_free_dma(struct skd_device *skdev, struct kmem_cache *s,
+ void *vaddr, dma_addr_t dma_handle,
+ enum dma_data_direction dir)
+{
+ if (!vaddr)
+ return;
+
+ dma_unmap_single(&skdev->pdev->dev, dma_handle, s->size, dir);
+ kmem_cache_free(s, vaddr);
+}
+
static int skd_cons_skcomp(struct skd_device *skdev)
{
int rc = 0;
struct fit_completion_entry_v1 *skcomp;
- u32 nbytes;
- nbytes = sizeof(*skcomp) * SKD_N_COMPLETION_ENTRY;
- nbytes += sizeof(struct fit_comp_error_info) * SKD_N_COMPLETION_ENTRY;
+ dev_dbg(&skdev->pdev->dev,
+ "comp pci_alloc, total bytes %zd entries %d\n",
+ SKD_SKCOMP_SIZE, SKD_N_COMPLETION_ENTRY);
- pr_debug("%s:%s:%d comp pci_alloc, total bytes %d entries %d\n",
- skdev->name, __func__, __LINE__,
- nbytes, SKD_N_COMPLETION_ENTRY);
-
- skcomp = pci_zalloc_consistent(skdev->pdev, nbytes,
+ skcomp = pci_zalloc_consistent(skdev->pdev, SKD_SKCOMP_SIZE,
&skdev->cq_dma_address);
if (skcomp == NULL) {
@@ -4000,14 +2652,14 @@ static int skd_cons_skmsg(struct skd_device *skdev)
int rc = 0;
u32 i;
- pr_debug("%s:%s:%d skmsg_table kzalloc, struct %lu, count %u total %lu\n",
- skdev->name, __func__, __LINE__,
- sizeof(struct skd_fitmsg_context),
- skdev->num_fitmsg_context,
- sizeof(struct skd_fitmsg_context) * skdev->num_fitmsg_context);
+ dev_dbg(&skdev->pdev->dev,
+ "skmsg_table kcalloc, struct %lu, count %u total %lu\n",
+ sizeof(struct skd_fitmsg_context), skdev->num_fitmsg_context,
+ sizeof(struct skd_fitmsg_context) * skdev->num_fitmsg_context);
- skdev->skmsg_table = kzalloc(sizeof(struct skd_fitmsg_context)
- *skdev->num_fitmsg_context, GFP_KERNEL);
+ skdev->skmsg_table = kcalloc(skdev->num_fitmsg_context,
+ sizeof(struct skd_fitmsg_context),
+ GFP_KERNEL);
if (skdev->skmsg_table == NULL) {
rc = -ENOMEM;
goto err_out;
@@ -4020,9 +2672,8 @@ static int skd_cons_skmsg(struct skd_device *skdev)
skmsg->id = i + SKD_ID_FIT_MSG;
- skmsg->state = SKD_MSG_STATE_IDLE;
skmsg->msg_buf = pci_alloc_consistent(skdev->pdev,
- SKD_N_FITMSG_BYTES + 64,
+ SKD_N_FITMSG_BYTES,
&skmsg->mb_dma_address);
if (skmsg->msg_buf == NULL) {
@@ -4030,22 +2681,13 @@ static int skd_cons_skmsg(struct skd_device *skdev)
goto err_out;
}
- skmsg->offset = (u32)((u64)skmsg->msg_buf &
- (~FIT_QCMD_BASE_ADDRESS_MASK));
- skmsg->msg_buf += ~FIT_QCMD_BASE_ADDRESS_MASK;
- skmsg->msg_buf = (u8 *)((u64)skmsg->msg_buf &
- FIT_QCMD_BASE_ADDRESS_MASK);
- skmsg->mb_dma_address += ~FIT_QCMD_BASE_ADDRESS_MASK;
- skmsg->mb_dma_address &= FIT_QCMD_BASE_ADDRESS_MASK;
+ WARN(((uintptr_t)skmsg->msg_buf | skmsg->mb_dma_address) &
+ (FIT_QCMD_ALIGN - 1),
+ "not aligned: msg_buf %p mb_dma_address %#llx\n",
+ skmsg->msg_buf, skmsg->mb_dma_address);
memset(skmsg->msg_buf, 0, SKD_N_FITMSG_BYTES);
-
- skmsg->next = &skmsg[1];
}
- /* Free list is in order starting with the 0th entry. */
- skdev->skmsg_table[i - 1].next = NULL;
- skdev->skmsg_free_list = skdev->skmsg_table;
-
err_out:
return rc;
}
@@ -4055,18 +2697,14 @@ static struct fit_sg_descriptor *skd_cons_sg_list(struct skd_device *skdev,
dma_addr_t *ret_dma_addr)
{
struct fit_sg_descriptor *sg_list;
- u32 nbytes;
-
- nbytes = sizeof(*sg_list) * n_sg;
- sg_list = pci_alloc_consistent(skdev->pdev, nbytes, ret_dma_addr);
+ sg_list = skd_alloc_dma(skdev, skdev->sglist_cache, ret_dma_addr,
+ GFP_DMA | __GFP_ZERO, DMA_TO_DEVICE);
if (sg_list != NULL) {
uint64_t dma_address = *ret_dma_addr;
u32 i;
- memset(sg_list, 0, nbytes);
-
for (i = 0; i < n_sg - 1; i++) {
uint64_t ndp_off;
ndp_off = (i + 1) * sizeof(struct fit_sg_descriptor);
@@ -4079,153 +2717,63 @@ static struct fit_sg_descriptor *skd_cons_sg_list(struct skd_device *skdev,
return sg_list;
}
-static int skd_cons_skreq(struct skd_device *skdev)
+static void skd_free_sg_list(struct skd_device *skdev,
+ struct fit_sg_descriptor *sg_list,
+ dma_addr_t dma_addr)
{
- int rc = 0;
- u32 i;
-
- pr_debug("%s:%s:%d skreq_table kzalloc, struct %lu, count %u total %lu\n",
- skdev->name, __func__, __LINE__,
- sizeof(struct skd_request_context),
- skdev->num_req_context,
- sizeof(struct skd_request_context) * skdev->num_req_context);
-
- skdev->skreq_table = kzalloc(sizeof(struct skd_request_context)
- * skdev->num_req_context, GFP_KERNEL);
- if (skdev->skreq_table == NULL) {
- rc = -ENOMEM;
- goto err_out;
- }
-
- pr_debug("%s:%s:%d alloc sg_table sg_per_req %u scatlist %lu total %lu\n",
- skdev->name, __func__, __LINE__,
- skdev->sgs_per_request, sizeof(struct scatterlist),
- skdev->sgs_per_request * sizeof(struct scatterlist));
-
- for (i = 0; i < skdev->num_req_context; i++) {
- struct skd_request_context *skreq;
-
- skreq = &skdev->skreq_table[i];
-
- skreq->id = i + SKD_ID_RW_REQUEST;
- skreq->state = SKD_REQ_STATE_IDLE;
-
- skreq->sg = kzalloc(sizeof(struct scatterlist) *
- skdev->sgs_per_request, GFP_KERNEL);
- if (skreq->sg == NULL) {
- rc = -ENOMEM;
- goto err_out;
- }
- sg_init_table(skreq->sg, skdev->sgs_per_request);
-
- skreq->sksg_list = skd_cons_sg_list(skdev,
- skdev->sgs_per_request,
- &skreq->sksg_dma_address);
-
- if (skreq->sksg_list == NULL) {
- rc = -ENOMEM;
- goto err_out;
- }
-
- skreq->next = &skreq[1];
- }
-
- /* Free list is in order starting with the 0th entry. */
- skdev->skreq_table[i - 1].next = NULL;
- skdev->skreq_free_list = skdev->skreq_table;
+ if (WARN_ON_ONCE(!sg_list))
+ return;
-err_out:
- return rc;
+ skd_free_dma(skdev, skdev->sglist_cache, sg_list, dma_addr,
+ DMA_TO_DEVICE);
}
-static int skd_cons_skspcl(struct skd_device *skdev)
+static int skd_init_request(struct blk_mq_tag_set *set, struct request *rq,
+ unsigned int hctx_idx, unsigned int numa_node)
{
- int rc = 0;
- u32 i, nbytes;
-
- pr_debug("%s:%s:%d skspcl_table kzalloc, struct %lu, count %u total %lu\n",
- skdev->name, __func__, __LINE__,
- sizeof(struct skd_special_context),
- skdev->n_special,
- sizeof(struct skd_special_context) * skdev->n_special);
-
- skdev->skspcl_table = kzalloc(sizeof(struct skd_special_context)
- * skdev->n_special, GFP_KERNEL);
- if (skdev->skspcl_table == NULL) {
- rc = -ENOMEM;
- goto err_out;
- }
+ struct skd_device *skdev = set->driver_data;
+ struct skd_request_context *skreq = blk_mq_rq_to_pdu(rq);
- for (i = 0; i < skdev->n_special; i++) {
- struct skd_special_context *skspcl;
-
- skspcl = &skdev->skspcl_table[i];
-
- skspcl->req.id = i + SKD_ID_SPECIAL_REQUEST;
- skspcl->req.state = SKD_REQ_STATE_IDLE;
-
- skspcl->req.next = &skspcl[1].req;
-
- nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
-
- skspcl->msg_buf =
- pci_zalloc_consistent(skdev->pdev, nbytes,
- &skspcl->mb_dma_address);
- if (skspcl->msg_buf == NULL) {
- rc = -ENOMEM;
- goto err_out;
- }
-
- skspcl->req.sg = kzalloc(sizeof(struct scatterlist) *
- SKD_N_SG_PER_SPECIAL, GFP_KERNEL);
- if (skspcl->req.sg == NULL) {
- rc = -ENOMEM;
- goto err_out;
- }
-
- skspcl->req.sksg_list = skd_cons_sg_list(skdev,
- SKD_N_SG_PER_SPECIAL,
- &skspcl->req.
- sksg_dma_address);
- if (skspcl->req.sksg_list == NULL) {
- rc = -ENOMEM;
- goto err_out;
- }
- }
+ skreq->state = SKD_REQ_STATE_IDLE;
+ skreq->sg = (void *)(skreq + 1);
+ sg_init_table(skreq->sg, skd_sgs_per_request);
+ skreq->sksg_list = skd_cons_sg_list(skdev, skd_sgs_per_request,
+ &skreq->sksg_dma_address);
- /* Free list is in order starting with the 0th entry. */
- skdev->skspcl_table[i - 1].req.next = NULL;
- skdev->skspcl_free_list = skdev->skspcl_table;
+ return skreq->sksg_list ? 0 : -ENOMEM;
+}
- return rc;
+static void skd_exit_request(struct blk_mq_tag_set *set, struct request *rq,
+ unsigned int hctx_idx)
+{
+ struct skd_device *skdev = set->driver_data;
+ struct skd_request_context *skreq = blk_mq_rq_to_pdu(rq);
-err_out:
- return rc;
+ skd_free_sg_list(skdev, skreq->sksg_list, skreq->sksg_dma_address);
}
static int skd_cons_sksb(struct skd_device *skdev)
{
int rc = 0;
struct skd_special_context *skspcl;
- u32 nbytes;
skspcl = &skdev->internal_skspcl;
skspcl->req.id = 0 + SKD_ID_INTERNAL;
skspcl->req.state = SKD_REQ_STATE_IDLE;
- nbytes = SKD_N_INTERNAL_BYTES;
-
- skspcl->data_buf = pci_zalloc_consistent(skdev->pdev, nbytes,
- &skspcl->db_dma_address);
+ skspcl->data_buf = skd_alloc_dma(skdev, skdev->databuf_cache,
+ &skspcl->db_dma_address,
+ GFP_DMA | __GFP_ZERO,
+ DMA_BIDIRECTIONAL);
if (skspcl->data_buf == NULL) {
rc = -ENOMEM;
goto err_out;
}
- nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
- skspcl->msg_buf = pci_zalloc_consistent(skdev->pdev, nbytes,
- &skspcl->mb_dma_address);
+ skspcl->msg_buf = skd_alloc_dma(skdev, skdev->msgbuf_cache,
+ &skspcl->mb_dma_address,
+ GFP_DMA | __GFP_ZERO, DMA_TO_DEVICE);
if (skspcl->msg_buf == NULL) {
rc = -ENOMEM;
goto err_out;
@@ -4247,6 +2795,14 @@ err_out:
return rc;
}
+static const struct blk_mq_ops skd_mq_ops = {
+ .queue_rq = skd_mq_queue_rq,
+ .complete = skd_complete_rq,
+ .timeout = skd_timed_out,
+ .init_request = skd_init_request,
+ .exit_request = skd_exit_request,
+};
+
static int skd_cons_disk(struct skd_device *skdev)
{
int rc = 0;
@@ -4268,31 +2824,46 @@ static int skd_cons_disk(struct skd_device *skdev)
disk->fops = &skd_blockdev_ops;
disk->private_data = skdev;
- q = blk_init_queue(skd_request_fn, &skdev->lock);
- if (!q) {
- rc = -ENOMEM;
+ memset(&skdev->tag_set, 0, sizeof(skdev->tag_set));
+ skdev->tag_set.ops = &skd_mq_ops;
+ skdev->tag_set.nr_hw_queues = 1;
+ skdev->tag_set.queue_depth = skd_max_queue_depth;
+ skdev->tag_set.cmd_size = sizeof(struct skd_request_context) +
+ skdev->sgs_per_request * sizeof(struct scatterlist);
+ skdev->tag_set.numa_node = NUMA_NO_NODE;
+ skdev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE |
+ BLK_MQ_F_SG_MERGE |
+ BLK_ALLOC_POLICY_TO_MQ_FLAG(BLK_TAG_ALLOC_FIFO);
+ skdev->tag_set.driver_data = skdev;
+ rc = blk_mq_alloc_tag_set(&skdev->tag_set);
+ if (rc)
+ goto err_out;
+ q = blk_mq_init_queue(&skdev->tag_set);
+ if (IS_ERR(q)) {
+ blk_mq_free_tag_set(&skdev->tag_set);
+ rc = PTR_ERR(q);
goto err_out;
}
- blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
+ q->queuedata = skdev;
skdev->queue = q;
disk->queue = q;
- q->queuedata = skdev;
blk_queue_write_cache(q, true, true);
blk_queue_max_segments(q, skdev->sgs_per_request);
blk_queue_max_hw_sectors(q, SKD_N_MAX_SECTORS);
- /* set sysfs ptimal_io_size to 8K */
+ /* set optimal I/O size to 8KB */
blk_queue_io_opt(q, 8192);
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q);
+ blk_queue_rq_timeout(q, 8 * HZ);
+
spin_lock_irqsave(&skdev->lock, flags);
- pr_debug("%s:%s:%d stopping %s queue\n",
- skdev->name, __func__, __LINE__, skdev->name);
- blk_stop_queue(skdev->queue);
+ dev_dbg(&skdev->pdev->dev, "stopping queue\n");
+ blk_mq_stop_hw_queues(skdev->queue);
spin_unlock_irqrestore(&skdev->lock, flags);
err_out:
@@ -4306,13 +2877,13 @@ static struct skd_device *skd_construct(struct pci_dev *pdev)
{
struct skd_device *skdev;
int blk_major = skd_major;
+ size_t size;
int rc;
skdev = kzalloc(sizeof(*skdev), GFP_KERNEL);
if (!skdev) {
- pr_err(PFX "(%s): memory alloc failure\n",
- pci_name(pdev));
+ dev_err(&pdev->dev, "memory alloc failure\n");
return NULL;
}
@@ -4320,60 +2891,71 @@ static struct skd_device *skd_construct(struct pci_dev *pdev)
skdev->pdev = pdev;
skdev->devno = skd_next_devno++;
skdev->major = blk_major;
- sprintf(skdev->name, DRV_NAME "%d", skdev->devno);
skdev->dev_max_queue_depth = 0;
skdev->num_req_context = skd_max_queue_depth;
skdev->num_fitmsg_context = skd_max_queue_depth;
- skdev->n_special = skd_max_pass_thru;
skdev->cur_max_queue_depth = 1;
skdev->queue_low_water_mark = 1;
skdev->proto_ver = 99;
skdev->sgs_per_request = skd_sgs_per_request;
skdev->dbg_level = skd_dbg_level;
- atomic_set(&skdev->device_count, 0);
-
spin_lock_init(&skdev->lock);
+ INIT_WORK(&skdev->start_queue, skd_start_queue);
INIT_WORK(&skdev->completion_worker, skd_completion_worker);
- pr_debug("%s:%s:%d skcomp\n", skdev->name, __func__, __LINE__);
- rc = skd_cons_skcomp(skdev);
- if (rc < 0)
+ size = max(SKD_N_FITMSG_BYTES, SKD_N_SPECIAL_FITMSG_BYTES);
+ skdev->msgbuf_cache = kmem_cache_create("skd-msgbuf", size, 0,
+ SLAB_HWCACHE_ALIGN, NULL);
+ if (!skdev->msgbuf_cache)
goto err_out;
-
- pr_debug("%s:%s:%d skmsg\n", skdev->name, __func__, __LINE__);
- rc = skd_cons_skmsg(skdev);
- if (rc < 0)
+ WARN_ONCE(kmem_cache_size(skdev->msgbuf_cache) < size,
+ "skd-msgbuf: %d < %zd\n",
+ kmem_cache_size(skdev->msgbuf_cache), size);
+ size = skd_sgs_per_request * sizeof(struct fit_sg_descriptor);
+ skdev->sglist_cache = kmem_cache_create("skd-sglist", size, 0,
+ SLAB_HWCACHE_ALIGN, NULL);
+ if (!skdev->sglist_cache)
+ goto err_out;
+ WARN_ONCE(kmem_cache_size(skdev->sglist_cache) < size,
+ "skd-sglist: %d < %zd\n",
+ kmem_cache_size(skdev->sglist_cache), size);
+ size = SKD_N_INTERNAL_BYTES;
+ skdev->databuf_cache = kmem_cache_create("skd-databuf", size, 0,
+ SLAB_HWCACHE_ALIGN, NULL);
+ if (!skdev->databuf_cache)
goto err_out;
+ WARN_ONCE(kmem_cache_size(skdev->databuf_cache) < size,
+ "skd-databuf: %d < %zd\n",
+ kmem_cache_size(skdev->databuf_cache), size);
- pr_debug("%s:%s:%d skreq\n", skdev->name, __func__, __LINE__);
- rc = skd_cons_skreq(skdev);
+ dev_dbg(&skdev->pdev->dev, "skcomp\n");
+ rc = skd_cons_skcomp(skdev);
if (rc < 0)
goto err_out;
- pr_debug("%s:%s:%d skspcl\n", skdev->name, __func__, __LINE__);
- rc = skd_cons_skspcl(skdev);
+ dev_dbg(&skdev->pdev->dev, "skmsg\n");
+ rc = skd_cons_skmsg(skdev);
if (rc < 0)
goto err_out;
- pr_debug("%s:%s:%d sksb\n", skdev->name, __func__, __LINE__);
+ dev_dbg(&skdev->pdev->dev, "sksb\n");
rc = skd_cons_sksb(skdev);
if (rc < 0)
goto err_out;
- pr_debug("%s:%s:%d disk\n", skdev->name, __func__, __LINE__);
+ dev_dbg(&skdev->pdev->dev, "disk\n");
rc = skd_cons_disk(skdev);
if (rc < 0)
goto err_out;
- pr_debug("%s:%s:%d VICTORY\n", skdev->name, __func__, __LINE__);
+ dev_dbg(&skdev->pdev->dev, "VICTORY\n");
return skdev;
err_out:
- pr_debug("%s:%s:%d construct failed\n",
- skdev->name, __func__, __LINE__);
+ dev_dbg(&skdev->pdev->dev, "construct failed\n");
skd_destruct(skdev);
return NULL;
}
@@ -4386,14 +2968,9 @@ err_out:
static void skd_free_skcomp(struct skd_device *skdev)
{
- if (skdev->skcomp_table != NULL) {
- u32 nbytes;
-
- nbytes = sizeof(skdev->skcomp_table[0]) *
- SKD_N_COMPLETION_ENTRY;
- pci_free_consistent(skdev->pdev, nbytes,
+ if (skdev->skcomp_table)
+ pci_free_consistent(skdev->pdev, SKD_SKCOMP_SIZE,
skdev->skcomp_table, skdev->cq_dma_address);
- }
skdev->skcomp_table = NULL;
skdev->cq_dma_address = 0;
@@ -4412,8 +2989,6 @@ static void skd_free_skmsg(struct skd_device *skdev)
skmsg = &skdev->skmsg_table[i];
if (skmsg->msg_buf != NULL) {
- skmsg->msg_buf += skmsg->offset;
- skmsg->mb_dma_address += skmsg->offset;
pci_free_consistent(skdev->pdev, SKD_N_FITMSG_BYTES,
skmsg->msg_buf,
skmsg->mb_dma_address);
@@ -4426,109 +3001,23 @@ static void skd_free_skmsg(struct skd_device *skdev)
skdev->skmsg_table = NULL;
}
-static void skd_free_sg_list(struct skd_device *skdev,
- struct fit_sg_descriptor *sg_list,
- u32 n_sg, dma_addr_t dma_addr)
-{
- if (sg_list != NULL) {
- u32 nbytes;
-
- nbytes = sizeof(*sg_list) * n_sg;
-
- pci_free_consistent(skdev->pdev, nbytes, sg_list, dma_addr);
- }
-}
-
-static void skd_free_skreq(struct skd_device *skdev)
-{
- u32 i;
-
- if (skdev->skreq_table == NULL)
- return;
-
- for (i = 0; i < skdev->num_req_context; i++) {
- struct skd_request_context *skreq;
-
- skreq = &skdev->skreq_table[i];
-
- skd_free_sg_list(skdev, skreq->sksg_list,
- skdev->sgs_per_request,
- skreq->sksg_dma_address);
-
- skreq->sksg_list = NULL;
- skreq->sksg_dma_address = 0;
-
- kfree(skreq->sg);
- }
-
- kfree(skdev->skreq_table);
- skdev->skreq_table = NULL;
-}
-
-static void skd_free_skspcl(struct skd_device *skdev)
-{
- u32 i;
- u32 nbytes;
-
- if (skdev->skspcl_table == NULL)
- return;
-
- for (i = 0; i < skdev->n_special; i++) {
- struct skd_special_context *skspcl;
-
- skspcl = &skdev->skspcl_table[i];
-
- if (skspcl->msg_buf != NULL) {
- nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
- pci_free_consistent(skdev->pdev, nbytes,
- skspcl->msg_buf,
- skspcl->mb_dma_address);
- }
-
- skspcl->msg_buf = NULL;
- skspcl->mb_dma_address = 0;
-
- skd_free_sg_list(skdev, skspcl->req.sksg_list,
- SKD_N_SG_PER_SPECIAL,
- skspcl->req.sksg_dma_address);
-
- skspcl->req.sksg_list = NULL;
- skspcl->req.sksg_dma_address = 0;
-
- kfree(skspcl->req.sg);
- }
-
- kfree(skdev->skspcl_table);
- skdev->skspcl_table = NULL;
-}
-
static void skd_free_sksb(struct skd_device *skdev)
{
- struct skd_special_context *skspcl;
- u32 nbytes;
-
- skspcl = &skdev->internal_skspcl;
-
- if (skspcl->data_buf != NULL) {
- nbytes = SKD_N_INTERNAL_BYTES;
+ struct skd_special_context *skspcl = &skdev->internal_skspcl;
- pci_free_consistent(skdev->pdev, nbytes,
- skspcl->data_buf, skspcl->db_dma_address);
- }
+ skd_free_dma(skdev, skdev->databuf_cache, skspcl->data_buf,
+ skspcl->db_dma_address, DMA_BIDIRECTIONAL);
skspcl->data_buf = NULL;
skspcl->db_dma_address = 0;
- if (skspcl->msg_buf != NULL) {
- nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
- pci_free_consistent(skdev->pdev, nbytes,
- skspcl->msg_buf, skspcl->mb_dma_address);
- }
+ skd_free_dma(skdev, skdev->msgbuf_cache, skspcl->msg_buf,
+ skspcl->mb_dma_address, DMA_TO_DEVICE);
skspcl->msg_buf = NULL;
skspcl->mb_dma_address = 0;
- skd_free_sg_list(skdev, skspcl->req.sksg_list, 1,
+ skd_free_sg_list(skdev, skspcl->req.sksg_list,
skspcl->req.sksg_dma_address);
skspcl->req.sksg_list = NULL;
@@ -4539,15 +3028,20 @@ static void skd_free_disk(struct skd_device *skdev)
{
struct gendisk *disk = skdev->disk;
- if (disk != NULL) {
- struct request_queue *q = disk->queue;
+ if (disk && (disk->flags & GENHD_FL_UP))
+ del_gendisk(disk);
- if (disk->flags & GENHD_FL_UP)
- del_gendisk(disk);
- if (q)
- blk_cleanup_queue(q);
- put_disk(disk);
+ if (skdev->queue) {
+ blk_cleanup_queue(skdev->queue);
+ skdev->queue = NULL;
+ if (disk)
+ disk->queue = NULL;
}
+
+ if (skdev->tag_set.tags)
+ blk_mq_free_tag_set(&skdev->tag_set);
+
+ put_disk(disk);
skdev->disk = NULL;
}
@@ -4556,26 +3050,25 @@ static void skd_destruct(struct skd_device *skdev)
if (skdev == NULL)
return;
+ cancel_work_sync(&skdev->start_queue);
- pr_debug("%s:%s:%d disk\n", skdev->name, __func__, __LINE__);
+ dev_dbg(&skdev->pdev->dev, "disk\n");
skd_free_disk(skdev);
- pr_debug("%s:%s:%d sksb\n", skdev->name, __func__, __LINE__);
+ dev_dbg(&skdev->pdev->dev, "sksb\n");
skd_free_sksb(skdev);
- pr_debug("%s:%s:%d skspcl\n", skdev->name, __func__, __LINE__);
- skd_free_skspcl(skdev);
-
- pr_debug("%s:%s:%d skreq\n", skdev->name, __func__, __LINE__);
- skd_free_skreq(skdev);
-
- pr_debug("%s:%s:%d skmsg\n", skdev->name, __func__, __LINE__);
+ dev_dbg(&skdev->pdev->dev, "skmsg\n");
skd_free_skmsg(skdev);
- pr_debug("%s:%s:%d skcomp\n", skdev->name, __func__, __LINE__);
+ dev_dbg(&skdev->pdev->dev, "skcomp\n");
skd_free_skcomp(skdev);
- pr_debug("%s:%s:%d skdev\n", skdev->name, __func__, __LINE__);
+ kmem_cache_destroy(skdev->databuf_cache);
+ kmem_cache_destroy(skdev->sglist_cache);
+ kmem_cache_destroy(skdev->msgbuf_cache);
+
+ dev_dbg(&skdev->pdev->dev, "skdev\n");
kfree(skdev);
}
@@ -4592,9 +3085,8 @@ static int skd_bdev_getgeo(struct block_device *bdev, struct hd_geometry *geo)
skdev = bdev->bd_disk->private_data;
- pr_debug("%s:%s:%d %s: CMD[%s] getgeo device\n",
- skdev->name, __func__, __LINE__,
- bdev->bd_disk->disk_name, current->comm);
+ dev_dbg(&skdev->pdev->dev, "%s: CMD[%s] getgeo device\n",
+ bdev->bd_disk->disk_name, current->comm);
if (skdev->read_cap_is_valid) {
capacity = get_capacity(skdev->disk);
@@ -4609,18 +3101,16 @@ static int skd_bdev_getgeo(struct block_device *bdev, struct hd_geometry *geo)
static int skd_bdev_attach(struct device *parent, struct skd_device *skdev)
{
- pr_debug("%s:%s:%d add_disk\n", skdev->name, __func__, __LINE__);
+ dev_dbg(&skdev->pdev->dev, "add_disk\n");
device_add_disk(parent, skdev->disk);
return 0;
}
static const struct block_device_operations skd_blockdev_ops = {
.owner = THIS_MODULE,
- .ioctl = skd_bdev_ioctl,
.getgeo = skd_bdev_getgeo,
};
-
/*
*****************************************************************************
* PCIe DRIVER GLUE
@@ -4671,10 +3161,8 @@ static int skd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
char pci_str[32];
struct skd_device *skdev;
- pr_info("STEC s1120 Driver(%s) version %s-b%s\n",
- DRV_NAME, DRV_VERSION, DRV_BUILD_ID);
- pr_info("(skd?:??:[%s]): vendor=%04X device=%04x\n",
- pci_name(pdev), pdev->vendor, pdev->device);
+ dev_dbg(&pdev->dev, "vendor=%04X device=%04x\n", pdev->vendor,
+ pdev->device);
rc = pci_enable_device(pdev);
if (rc)
@@ -4685,16 +3173,13 @@ static int skd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
if (!rc) {
if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
-
- pr_err("(%s): consistent DMA mask error %d\n",
- pci_name(pdev), rc);
+ dev_err(&pdev->dev, "consistent DMA mask error %d\n",
+ rc);
}
} else {
- (rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)));
+ rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (rc) {
-
- pr_err("(%s): DMA mask error %d\n",
- pci_name(pdev), rc);
+ dev_err(&pdev->dev, "DMA mask error %d\n", rc);
goto err_out_regions;
}
}
@@ -4714,19 +3199,17 @@ static int skd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
skd_pci_info(skdev, pci_str);
- pr_info("(%s): %s 64bit\n", skd_name(skdev), pci_str);
+ dev_info(&pdev->dev, "%s 64bit\n", pci_str);
pci_set_master(pdev);
rc = pci_enable_pcie_error_reporting(pdev);
if (rc) {
- pr_err(
- "(%s): bad enable of PCIe error reporting rc=%d\n",
- skd_name(skdev), rc);
+ dev_err(&pdev->dev,
+ "bad enable of PCIe error reporting rc=%d\n", rc);
skdev->pcie_error_reporting_is_enabled = 0;
} else
skdev->pcie_error_reporting_is_enabled = 1;
-
pci_set_drvdata(pdev, skdev);
for (i = 0; i < SKD_MAX_BARS; i++) {
@@ -4735,21 +3218,19 @@ static int skd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
skdev->mem_map[i] = ioremap(skdev->mem_phys[i],
skdev->mem_size[i]);
if (!skdev->mem_map[i]) {
- pr_err("(%s): Unable to map adapter memory!\n",
- skd_name(skdev));
+ dev_err(&pdev->dev,
+ "Unable to map adapter memory!\n");
rc = -ENODEV;
goto err_out_iounmap;
}
- pr_debug("%s:%s:%d mem_map=%p, phyd=%016llx, size=%d\n",
- skdev->name, __func__, __LINE__,
- skdev->mem_map[i],
- (uint64_t)skdev->mem_phys[i], skdev->mem_size[i]);
+ dev_dbg(&pdev->dev, "mem_map=%p, phyd=%016llx, size=%d\n",
+ skdev->mem_map[i], (uint64_t)skdev->mem_phys[i],
+ skdev->mem_size[i]);
}
rc = skd_acquire_irq(skdev);
if (rc) {
- pr_err("(%s): interrupt resource error %d\n",
- skd_name(skdev), rc);
+ dev_err(&pdev->dev, "interrupt resource error %d\n", rc);
goto err_out_iounmap;
}
@@ -4771,29 +3252,14 @@ static int skd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
} else {
/* we timed out, something is wrong with the device,
don't add the disk structure */
- pr_err(
- "(%s): error: waiting for s1120 timed out %d!\n",
- skd_name(skdev), rc);
+ dev_err(&pdev->dev, "error: waiting for s1120 timed out %d!\n",
+ rc);
/* in case of no error; we timeout with ENXIO */
if (!rc)
rc = -ENXIO;
goto err_out_timer;
}
-
-#ifdef SKD_VMK_POLL_HANDLER
- if (skdev->irq_type == SKD_IRQ_MSIX) {
- /* MSIX completion handler is being used for coredump */
- vmklnx_scsi_register_poll_handler(skdev->scsi_host,
- skdev->msix_entries[5].vector,
- skd_comp_q, skdev);
- } else {
- vmklnx_scsi_register_poll_handler(skdev->scsi_host,
- skdev->pdev->irq, skd_isr,
- skdev);
- }
-#endif /* SKD_VMK_POLL_HANDLER */
-
return rc;
err_out_timer:
@@ -4826,7 +3292,7 @@ static void skd_pci_remove(struct pci_dev *pdev)
skdev = pci_get_drvdata(pdev);
if (!skdev) {
- pr_err("%s: no device data for PCI\n", pci_name(pdev));
+ dev_err(&pdev->dev, "no device data for PCI\n");
return;
}
skd_stop_device(skdev);
@@ -4834,7 +3300,7 @@ static void skd_pci_remove(struct pci_dev *pdev)
for (i = 0; i < SKD_MAX_BARS; i++)
if (skdev->mem_map[i])
- iounmap((u32 *)skdev->mem_map[i]);
+ iounmap(skdev->mem_map[i]);
if (skdev->pcie_error_reporting_is_enabled)
pci_disable_pcie_error_reporting(pdev);
@@ -4855,7 +3321,7 @@ static int skd_pci_suspend(struct pci_dev *pdev, pm_message_t state)
skdev = pci_get_drvdata(pdev);
if (!skdev) {
- pr_err("%s: no device data for PCI\n", pci_name(pdev));
+ dev_err(&pdev->dev, "no device data for PCI\n");
return -EIO;
}
@@ -4865,7 +3331,7 @@ static int skd_pci_suspend(struct pci_dev *pdev, pm_message_t state)
for (i = 0; i < SKD_MAX_BARS; i++)
if (skdev->mem_map[i])
- iounmap((u32 *)skdev->mem_map[i]);
+ iounmap(skdev->mem_map[i]);
if (skdev->pcie_error_reporting_is_enabled)
pci_disable_pcie_error_reporting(pdev);
@@ -4885,7 +3351,7 @@ static int skd_pci_resume(struct pci_dev *pdev)
skdev = pci_get_drvdata(pdev);
if (!skdev) {
- pr_err("%s: no device data for PCI\n", pci_name(pdev));
+ dev_err(&pdev->dev, "no device data for PCI\n");
return -1;
}
@@ -4903,15 +3369,14 @@ static int skd_pci_resume(struct pci_dev *pdev)
if (!rc) {
if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
- pr_err("(%s): consistent DMA mask error %d\n",
- pci_name(pdev), rc);
+ dev_err(&pdev->dev, "consistent DMA mask error %d\n",
+ rc);
}
} else {
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (rc) {
- pr_err("(%s): DMA mask error %d\n",
- pci_name(pdev), rc);
+ dev_err(&pdev->dev, "DMA mask error %d\n", rc);
goto err_out_regions;
}
}
@@ -4919,8 +3384,8 @@ static int skd_pci_resume(struct pci_dev *pdev)
pci_set_master(pdev);
rc = pci_enable_pcie_error_reporting(pdev);
if (rc) {
- pr_err("(%s): bad enable of PCIe error reporting rc=%d\n",
- skdev->name, rc);
+ dev_err(&pdev->dev,
+ "bad enable of PCIe error reporting rc=%d\n", rc);
skdev->pcie_error_reporting_is_enabled = 0;
} else
skdev->pcie_error_reporting_is_enabled = 1;
@@ -4932,21 +3397,17 @@ static int skd_pci_resume(struct pci_dev *pdev)
skdev->mem_map[i] = ioremap(skdev->mem_phys[i],
skdev->mem_size[i]);
if (!skdev->mem_map[i]) {
- pr_err("(%s): Unable to map adapter memory!\n",
- skd_name(skdev));
+ dev_err(&pdev->dev, "Unable to map adapter memory!\n");
rc = -ENODEV;
goto err_out_iounmap;
}
- pr_debug("%s:%s:%d mem_map=%p, phyd=%016llx, size=%d\n",
- skdev->name, __func__, __LINE__,
- skdev->mem_map[i],
- (uint64_t)skdev->mem_phys[i], skdev->mem_size[i]);
+ dev_dbg(&pdev->dev, "mem_map=%p, phyd=%016llx, size=%d\n",
+ skdev->mem_map[i], (uint64_t)skdev->mem_phys[i],
+ skdev->mem_size[i]);
}
rc = skd_acquire_irq(skdev);
if (rc) {
-
- pr_err("(%s): interrupt resource error %d\n",
- pci_name(pdev), rc);
+ dev_err(&pdev->dev, "interrupt resource error %d\n", rc);
goto err_out_iounmap;
}
@@ -4984,15 +3445,15 @@ static void skd_pci_shutdown(struct pci_dev *pdev)
{
struct skd_device *skdev;
- pr_err("skd_pci_shutdown called\n");
+ dev_err(&pdev->dev, "%s called\n", __func__);
skdev = pci_get_drvdata(pdev);
if (!skdev) {
- pr_err("%s: no device data for PCI\n", pci_name(pdev));
+ dev_err(&pdev->dev, "no device data for PCI\n");
return;
}
- pr_err("%s: calling stop\n", skd_name(skdev));
+ dev_err(&pdev->dev, "calling stop\n");
skd_stop_device(skdev);
}
@@ -5012,21 +3473,6 @@ static struct pci_driver skd_driver = {
*****************************************************************************
*/
-static const char *skd_name(struct skd_device *skdev)
-{
- memset(skdev->id_str, 0, sizeof(skdev->id_str));
-
- if (skdev->inquiry_is_valid)
- snprintf(skdev->id_str, sizeof(skdev->id_str), "%s:%s:[%s]",
- skdev->name, skdev->inq_serial_num,
- pci_name(skdev->pdev));
- else
- snprintf(skdev->id_str, sizeof(skdev->id_str), "%s:??:[%s]",
- skdev->name, pci_name(skdev->pdev));
-
- return skdev->id_str;
-}
-
const char *skd_drive_state_to_str(int state)
{
switch (state) {
@@ -5078,8 +3524,6 @@ const char *skd_skdev_state_to_str(enum skd_drvr_state state)
return "PAUSING";
case SKD_DRVR_STATE_PAUSED:
return "PAUSED";
- case SKD_DRVR_STATE_DRAINING_TIMEOUT:
- return "DRAINING_TIMEOUT";
case SKD_DRVR_STATE_RESTARTING:
return "RESTARTING";
case SKD_DRVR_STATE_RESUMING:
@@ -5106,18 +3550,6 @@ const char *skd_skdev_state_to_str(enum skd_drvr_state state)
}
}
-static const char *skd_skmsg_state_to_str(enum skd_fit_msg_state state)
-{
- switch (state) {
- case SKD_MSG_STATE_IDLE:
- return "IDLE";
- case SKD_MSG_STATE_BUSY:
- return "BUSY";
- default:
- return "???";
- }
-}
-
static const char *skd_skreq_state_to_str(enum skd_req_state state)
{
switch (state) {
@@ -5131,8 +3563,6 @@ static const char *skd_skreq_state_to_str(enum skd_req_state state)
return "COMPLETED";
case SKD_REQ_STATE_TIMEOUT:
return "TIMEOUT";
- case SKD_REQ_STATE_ABORTED:
- return "ABORTED";
default:
return "???";
}
@@ -5140,58 +3570,34 @@ static const char *skd_skreq_state_to_str(enum skd_req_state state)
static void skd_log_skdev(struct skd_device *skdev, const char *event)
{
- pr_debug("%s:%s:%d (%s) skdev=%p event='%s'\n",
- skdev->name, __func__, __LINE__, skdev->name, skdev, event);
- pr_debug("%s:%s:%d drive_state=%s(%d) driver_state=%s(%d)\n",
- skdev->name, __func__, __LINE__,
- skd_drive_state_to_str(skdev->drive_state), skdev->drive_state,
- skd_skdev_state_to_str(skdev->state), skdev->state);
- pr_debug("%s:%s:%d busy=%d limit=%d dev=%d lowat=%d\n",
- skdev->name, __func__, __LINE__,
- skdev->in_flight, skdev->cur_max_queue_depth,
- skdev->dev_max_queue_depth, skdev->queue_low_water_mark);
- pr_debug("%s:%s:%d timestamp=0x%x cycle=%d cycle_ix=%d\n",
- skdev->name, __func__, __LINE__,
- skdev->timeout_stamp, skdev->skcomp_cycle, skdev->skcomp_ix);
-}
-
-static void skd_log_skmsg(struct skd_device *skdev,
- struct skd_fitmsg_context *skmsg, const char *event)
-{
- pr_debug("%s:%s:%d (%s) skmsg=%p event='%s'\n",
- skdev->name, __func__, __LINE__, skdev->name, skmsg, event);
- pr_debug("%s:%s:%d state=%s(%d) id=0x%04x length=%d\n",
- skdev->name, __func__, __LINE__,
- skd_skmsg_state_to_str(skmsg->state), skmsg->state,
- skmsg->id, skmsg->length);
+ dev_dbg(&skdev->pdev->dev, "skdev=%p event='%s'\n", skdev, event);
+ dev_dbg(&skdev->pdev->dev, " drive_state=%s(%d) driver_state=%s(%d)\n",
+ skd_drive_state_to_str(skdev->drive_state), skdev->drive_state,
+ skd_skdev_state_to_str(skdev->state), skdev->state);
+ dev_dbg(&skdev->pdev->dev, " busy=%d limit=%d dev=%d lowat=%d\n",
+ skd_in_flight(skdev), skdev->cur_max_queue_depth,
+ skdev->dev_max_queue_depth, skdev->queue_low_water_mark);
+ dev_dbg(&skdev->pdev->dev, " cycle=%d cycle_ix=%d\n",
+ skdev->skcomp_cycle, skdev->skcomp_ix);
}
static void skd_log_skreq(struct skd_device *skdev,
struct skd_request_context *skreq, const char *event)
{
- pr_debug("%s:%s:%d (%s) skreq=%p event='%s'\n",
- skdev->name, __func__, __LINE__, skdev->name, skreq, event);
- pr_debug("%s:%s:%d state=%s(%d) id=0x%04x fitmsg=0x%04x\n",
- skdev->name, __func__, __LINE__,
- skd_skreq_state_to_str(skreq->state), skreq->state,
- skreq->id, skreq->fitmsg_id);
- pr_debug("%s:%s:%d timo=0x%x sg_dir=%d n_sg=%d\n",
- skdev->name, __func__, __LINE__,
- skreq->timeout_stamp, skreq->sg_data_dir, skreq->n_sg);
-
- if (skreq->req != NULL) {
- struct request *req = skreq->req;
- u32 lba = (u32)blk_rq_pos(req);
- u32 count = blk_rq_sectors(req);
-
- pr_debug("%s:%s:%d "
- "req=%p lba=%u(0x%x) count=%u(0x%x) dir=%d\n",
- skdev->name, __func__, __LINE__,
- req, lba, lba, count, count,
- (int)rq_data_dir(req));
- } else
- pr_debug("%s:%s:%d req=NULL\n",
- skdev->name, __func__, __LINE__);
+ struct request *req = blk_mq_rq_from_pdu(skreq);
+ u32 lba = blk_rq_pos(req);
+ u32 count = blk_rq_sectors(req);
+
+ dev_dbg(&skdev->pdev->dev, "skreq=%p event='%s'\n", skreq, event);
+ dev_dbg(&skdev->pdev->dev, " state=%s(%d) id=0x%04x fitmsg=0x%04x\n",
+ skd_skreq_state_to_str(skreq->state), skreq->state, skreq->id,
+ skreq->fitmsg_id);
+ dev_dbg(&skdev->pdev->dev, " sg_dir=%d n_sg=%d\n",
+ skreq->data_dir, skreq->n_sg);
+
+ dev_dbg(&skdev->pdev->dev,
+ "req=%p lba=%u(0x%x) count=%u(0x%x) dir=%d\n", req, lba, lba,
+ count, count, (int)rq_data_dir(req));
}
/*
@@ -5202,7 +3608,14 @@ static void skd_log_skreq(struct skd_device *skdev,
static int __init skd_init(void)
{
- pr_info(PFX " v%s-b%s loaded\n", DRV_VERSION, DRV_BUILD_ID);
+ BUILD_BUG_ON(sizeof(struct fit_completion_entry_v1) != 8);
+ BUILD_BUG_ON(sizeof(struct fit_comp_error_info) != 32);
+ BUILD_BUG_ON(sizeof(struct skd_command_header) != 16);
+ BUILD_BUG_ON(sizeof(struct skd_scsi_request) != 32);
+ BUILD_BUG_ON(sizeof(struct driver_inquiry_data) != 44);
+ BUILD_BUG_ON(offsetof(struct skd_msg_buf, fmh) != 0);
+ BUILD_BUG_ON(offsetof(struct skd_msg_buf, scsi) != 64);
+ BUILD_BUG_ON(sizeof(struct skd_msg_buf) != SKD_N_FITMSG_BYTES);
switch (skd_isr_type) {
case SKD_IRQ_LEGACY:
@@ -5222,7 +3635,8 @@ static int __init skd_init(void)
skd_max_queue_depth = SKD_MAX_QUEUE_DEPTH_DEFAULT;
}
- if (skd_max_req_per_msg < 1 || skd_max_req_per_msg > 14) {
+ if (skd_max_req_per_msg < 1 ||
+ skd_max_req_per_msg > SKD_MAX_REQ_PER_MSG) {
pr_err(PFX "skd_max_req_per_msg %d invalid, re-set to %d\n",
skd_max_req_per_msg, SKD_MAX_REQ_PER_MSG_DEFAULT);
skd_max_req_per_msg = SKD_MAX_REQ_PER_MSG_DEFAULT;
@@ -5246,19 +3660,11 @@ static int __init skd_init(void)
skd_isr_comp_limit = 0;
}
- if (skd_max_pass_thru < 1 || skd_max_pass_thru > 50) {
- pr_err(PFX "skd_max_pass_thru %d invalid, re-set to %d\n",
- skd_max_pass_thru, SKD_N_SPECIAL_CONTEXT);
- skd_max_pass_thru = SKD_N_SPECIAL_CONTEXT;
- }
-
return pci_register_driver(&skd_driver);
}
static void __exit skd_exit(void)
{
- pr_info(PFX " v%s-b%s unloading\n", DRV_VERSION, DRV_BUILD_ID);
-
pci_unregister_driver(&skd_driver);
if (skd_major)
diff --git a/drivers/block/skd_s1120.h b/drivers/block/skd_s1120.h
index 61c757ff0161..de35f47e953c 100644
--- a/drivers/block/skd_s1120.h
+++ b/drivers/block/skd_s1120.h
@@ -1,19 +1,15 @@
-/* Copyright 2012 STEC, Inc.
+/*
+ * Copyright 2012 STEC, Inc.
+ * Copyright (c) 2017 Western Digital Corporation or its affiliates.
*
- * This file is licensed under the terms of the 3-clause
- * BSD License (http://opensource.org/licenses/BSD-3-Clause)
- * or the GNU GPL-2.0 (http://www.gnu.org/licenses/gpl-2.0.html),
- * at your option. Both licenses are also available in the LICENSE file
- * distributed with this project. This file may not be copied, modified,
- * or distributed except in accordance with those terms.
+ * This file is part of the Linux kernel, and is made available under
+ * the terms of the GNU General Public License version 2.
*/
#ifndef SKD_S1120_H
#define SKD_S1120_H
-#pragma pack(push, s1120_h, 1)
-
/*
* Q-channel, 64-bit r/w
*/
@@ -30,7 +26,7 @@
#define FIT_QCMD_MSGSIZE_128 (0x1 << 4)
#define FIT_QCMD_MSGSIZE_256 (0x2 << 4)
#define FIT_QCMD_MSGSIZE_512 (0x3 << 4)
-#define FIT_QCMD_BASE_ADDRESS_MASK (0xFFFFFFFFFFFFFFC0ull)
+#define FIT_QCMD_ALIGN L1_CACHE_BYTES
/*
* Control, 32-bit r/w
@@ -250,7 +246,7 @@ struct fit_msg_hdr {
* 20-23 of the FIT_MTD_FITFW_INIT response.
*/
struct fit_completion_entry_v1 {
- uint32_t num_returned_bytes;
+ __be32 num_returned_bytes;
uint16_t tag;
uint8_t status; /* SCSI status */
uint8_t cycle;
@@ -278,7 +274,7 @@ struct fit_comp_error_info {
uint16_t sks_low; /* 10: Sense Key Specific (LSW) */
uint16_t reserved3; /* 12: Part of additional sense bytes (unused) */
uint16_t uec; /* 14: Additional Sense Bytes */
- uint64_t per; /* 16: Additional Sense Bytes */
+ uint64_t per __packed; /* 16: Additional Sense Bytes */
uint8_t reserved4[2]; /* 1E: Additional Sense Bytes (unused) */
};
@@ -292,11 +288,11 @@ struct fit_comp_error_info {
* Version one has the last 32 bits sg_list_len_bytes;
*/
struct skd_command_header {
- uint64_t sg_list_dma_address;
+ __be64 sg_list_dma_address;
uint16_t tag;
uint8_t attribute;
uint8_t add_cdb_len; /* In 32 bit words */
- uint32_t sg_list_len_bytes;
+ __be32 sg_list_len_bytes;
};
struct skd_scsi_request {
@@ -309,22 +305,20 @@ struct driver_inquiry_data {
uint8_t peripheral_device_type:5;
uint8_t qualifier:3;
uint8_t page_code;
- uint16_t page_length;
- uint16_t pcie_bus_number;
+ __be16 page_length;
+ __be16 pcie_bus_number;
uint8_t pcie_device_number;
uint8_t pcie_function_number;
uint8_t pcie_link_speed;
uint8_t pcie_link_lanes;
- uint16_t pcie_vendor_id;
- uint16_t pcie_device_id;
- uint16_t pcie_subsystem_vendor_id;
- uint16_t pcie_subsystem_device_id;
+ __be16 pcie_vendor_id;
+ __be16 pcie_device_id;
+ __be16 pcie_subsystem_vendor_id;
+ __be16 pcie_subsystem_device_id;
uint8_t reserved1[2];
uint8_t reserved2[3];
uint8_t driver_version_length;
uint8_t driver_version[0x14];
};
-#pragma pack(pop, s1120_h)
-
#endif /* SKD_S1120_H */
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 1498b899a593..34e17ee799be 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -265,7 +265,7 @@ static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
}
spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
- if (req_op(req) == REQ_OP_SCSI_IN || req_op(req) == REQ_OP_SCSI_OUT)
+ if (blk_rq_is_scsi(req))
err = virtblk_add_req_scsi(vblk->vqs[qid].vq, vbr, vbr->sg, num);
else
err = virtblk_add_req(vblk->vqs[qid].vq, vbr, vbr->sg, num);
@@ -381,6 +381,7 @@ static void virtblk_config_changed_work(struct work_struct *work)
struct request_queue *q = vblk->disk->queue;
char cap_str_2[10], cap_str_10[10];
char *envp[] = { "RESIZE=1", NULL };
+ unsigned long long nblocks;
u64 capacity;
/* Host must always specify the capacity. */
@@ -393,16 +394,19 @@ static void virtblk_config_changed_work(struct work_struct *work)
capacity = (sector_t)-1;
}
- string_get_size(capacity, queue_logical_block_size(q),
+ nblocks = DIV_ROUND_UP_ULL(capacity, queue_logical_block_size(q) >> 9);
+
+ string_get_size(nblocks, queue_logical_block_size(q),
STRING_UNITS_2, cap_str_2, sizeof(cap_str_2));
- string_get_size(capacity, queue_logical_block_size(q),
+ string_get_size(nblocks, queue_logical_block_size(q),
STRING_UNITS_10, cap_str_10, sizeof(cap_str_10));
dev_notice(&vdev->dev,
- "new size: %llu %d-byte logical blocks (%s/%s)\n",
- (unsigned long long)capacity,
- queue_logical_block_size(q),
- cap_str_10, cap_str_2);
+ "new size: %llu %d-byte logical blocks (%s/%s)\n",
+ nblocks,
+ queue_logical_block_size(q),
+ cap_str_10,
+ cap_str_2);
set_capacity(vblk->disk, capacity);
revalidate_disk(vblk->disk);
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index fe7cd58c43d0..987d665e82de 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -705,9 +705,9 @@ static unsigned int xen_blkbk_unmap_prepare(
GNTMAP_host_map, pages[i]->handle);
pages[i]->handle = BLKBACK_INVALID_HANDLE;
invcount++;
- }
+ }
- return invcount;
+ return invcount;
}
static void xen_blkbk_unmap_and_respond_callback(int result, struct gntab_unmap_queue_data *data)
@@ -1251,6 +1251,7 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
break;
case BLKIF_OP_WRITE_BARRIER:
drain = true;
+ /* fall through */
case BLKIF_OP_FLUSH_DISKCACHE:
ring->st_f_req++;
operation = REQ_OP_WRITE;
@@ -1362,7 +1363,7 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
goto fail_put_bio;
biolist[nbio++] = bio;
- bio->bi_bdev = preq.bdev;
+ bio_set_dev(bio, preq.bdev);
bio->bi_private = pending_req;
bio->bi_end_io = end_block_io_op;
bio->bi_iter.bi_sector = preq.sector_number;
@@ -1381,7 +1382,7 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
goto fail_put_bio;
biolist[nbio++] = bio;
- bio->bi_bdev = preq.bdev;
+ bio_set_dev(bio, preq.bdev);
bio->bi_private = pending_req;
bio->bi_end_io = end_block_io_op;
bio_set_op_attrs(bio, operation, operation_flags);
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index 792da683e70d..21c1be1eb226 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -244,6 +244,7 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif)
{
struct pending_req *req, *n;
unsigned int j, r;
+ bool busy = false;
for (r = 0; r < blkif->nr_rings; r++) {
struct xen_blkif_ring *ring = &blkif->rings[r];
@@ -261,8 +262,10 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif)
* don't have any discard_io or other_io requests. So, checking
* for inflight IO is enough.
*/
- if (atomic_read(&ring->inflight) > 0)
- return -EBUSY;
+ if (atomic_read(&ring->inflight) > 0) {
+ busy = true;
+ continue;
+ }
if (ring->irq) {
unbind_from_irqhandler(ring->irq, ring);
@@ -300,6 +303,9 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif)
WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages));
ring->active = false;
}
+ if (busy)
+ return -EBUSY;
+
blkif->nr_ring_pages = 0;
/*
* blkif->rings was allocated in connect_ring, so we should free it in
@@ -810,7 +816,8 @@ static void frontend_changed(struct xenbus_device *dev,
xenbus_switch_state(dev, XenbusStateClosed);
if (xenbus_dev_is_online(dev))
break;
- /* fall through if not online */
+ /* fall through */
+ /* if not online */
case XenbusStateUnknown:
/* implies xen_blkif_disconnect() via xen_blkbk_remove() */
device_unregister(&dev->dev);
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 98e34e4c62b8..891265acb10e 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -2075,9 +2075,9 @@ static int blkfront_resume(struct xenbus_device *dev)
/*
* Get the bios in the request so we can re-queue them.
*/
- if (req_op(shadow[i].request) == REQ_OP_FLUSH ||
- req_op(shadow[i].request) == REQ_OP_DISCARD ||
- req_op(shadow[i].request) == REQ_OP_SECURE_ERASE ||
+ if (req_op(shadow[j].request) == REQ_OP_FLUSH ||
+ req_op(shadow[j].request) == REQ_OP_DISCARD ||
+ req_op(shadow[j].request) == REQ_OP_SECURE_ERASE ||
shadow[j].request->cmd_flags & REQ_FUA) {
/*
* Flush operations don't contain bios, so
@@ -2456,7 +2456,7 @@ static void blkback_changed(struct xenbus_device *dev,
case XenbusStateClosed:
if (dev->state == XenbusStateClosed)
break;
- /* Missed the backend's Closing state -- fallthrough */
+ /* fall through */
case XenbusStateClosing:
if (info)
blkfront_closing(info);
diff --git a/drivers/block/zram/Kconfig b/drivers/block/zram/Kconfig
index b8ecba6dcd3b..7cd4a8ec3c8f 100644
--- a/drivers/block/zram/Kconfig
+++ b/drivers/block/zram/Kconfig
@@ -13,3 +13,15 @@ config ZRAM
disks and maybe many more.
See zram.txt for more information.
+
+config ZRAM_WRITEBACK
+ bool "Write back incompressible page to backing device"
+ depends on ZRAM
+ default n
+ help
+ With incompressible page, there is no memory saving to keep it
+ in memory. Instead, write it out to backing device.
+ For this feature, admin should set up backing device via
+ /sys/block/zramX/backing_dev.
+
+ See zram.txt for more infomration.
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 3b1b6340ba13..2981c27d3aae 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -175,20 +175,11 @@ static inline void update_used_max(struct zram *zram,
} while (old_max != cur_max);
}
-static inline void zram_fill_page(char *ptr, unsigned long len,
+static inline void zram_fill_page(void *ptr, unsigned long len,
unsigned long value)
{
- int i;
- unsigned long *page = (unsigned long *)ptr;
-
WARN_ON_ONCE(!IS_ALIGNED(len, sizeof(unsigned long)));
-
- if (likely(value == 0)) {
- memset(ptr, 0, len);
- } else {
- for (i = 0; i < len / sizeof(*page); i++)
- page[i] = value;
- }
+ memset_l(ptr, value, len / sizeof(unsigned long));
}
static bool page_same_filled(void *ptr, unsigned long *element)
@@ -270,6 +261,349 @@ static ssize_t mem_used_max_store(struct device *dev,
return len;
}
+#ifdef CONFIG_ZRAM_WRITEBACK
+static bool zram_wb_enabled(struct zram *zram)
+{
+ return zram->backing_dev;
+}
+
+static void reset_bdev(struct zram *zram)
+{
+ struct block_device *bdev;
+
+ if (!zram_wb_enabled(zram))
+ return;
+
+ bdev = zram->bdev;
+ if (zram->old_block_size)
+ set_blocksize(bdev, zram->old_block_size);
+ blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
+ /* hope filp_close flush all of IO */
+ filp_close(zram->backing_dev, NULL);
+ zram->backing_dev = NULL;
+ zram->old_block_size = 0;
+ zram->bdev = NULL;
+
+ kvfree(zram->bitmap);
+ zram->bitmap = NULL;
+}
+
+static ssize_t backing_dev_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct zram *zram = dev_to_zram(dev);
+ struct file *file = zram->backing_dev;
+ char *p;
+ ssize_t ret;
+
+ down_read(&zram->init_lock);
+ if (!zram_wb_enabled(zram)) {
+ memcpy(buf, "none\n", 5);
+ up_read(&zram->init_lock);
+ return 5;
+ }
+
+ p = file_path(file, buf, PAGE_SIZE - 1);
+ if (IS_ERR(p)) {
+ ret = PTR_ERR(p);
+ goto out;
+ }
+
+ ret = strlen(p);
+ memmove(buf, p, ret);
+ buf[ret++] = '\n';
+out:
+ up_read(&zram->init_lock);
+ return ret;
+}
+
+static ssize_t backing_dev_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t len)
+{
+ char *file_name;
+ struct file *backing_dev = NULL;
+ struct inode *inode;
+ struct address_space *mapping;
+ unsigned int bitmap_sz, old_block_size = 0;
+ unsigned long nr_pages, *bitmap = NULL;
+ struct block_device *bdev = NULL;
+ int err;
+ struct zram *zram = dev_to_zram(dev);
+
+ file_name = kmalloc(PATH_MAX, GFP_KERNEL);
+ if (!file_name)
+ return -ENOMEM;
+
+ down_write(&zram->init_lock);
+ if (init_done(zram)) {
+ pr_info("Can't setup backing device for initialized device\n");
+ err = -EBUSY;
+ goto out;
+ }
+
+ strlcpy(file_name, buf, len);
+
+ backing_dev = filp_open(file_name, O_RDWR|O_LARGEFILE, 0);
+ if (IS_ERR(backing_dev)) {
+ err = PTR_ERR(backing_dev);
+ backing_dev = NULL;
+ goto out;
+ }
+
+ mapping = backing_dev->f_mapping;
+ inode = mapping->host;
+
+ /* Support only block device in this moment */
+ if (!S_ISBLK(inode->i_mode)) {
+ err = -ENOTBLK;
+ goto out;
+ }
+
+ bdev = bdgrab(I_BDEV(inode));
+ err = blkdev_get(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL, zram);
+ if (err < 0)
+ goto out;
+
+ nr_pages = i_size_read(inode) >> PAGE_SHIFT;
+ bitmap_sz = BITS_TO_LONGS(nr_pages) * sizeof(long);
+ bitmap = kvzalloc(bitmap_sz, GFP_KERNEL);
+ if (!bitmap) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ old_block_size = block_size(bdev);
+ err = set_blocksize(bdev, PAGE_SIZE);
+ if (err)
+ goto out;
+
+ reset_bdev(zram);
+ spin_lock_init(&zram->bitmap_lock);
+
+ zram->old_block_size = old_block_size;
+ zram->bdev = bdev;
+ zram->backing_dev = backing_dev;
+ zram->bitmap = bitmap;
+ zram->nr_pages = nr_pages;
+ up_write(&zram->init_lock);
+
+ pr_info("setup backing device %s\n", file_name);
+ kfree(file_name);
+
+ return len;
+out:
+ if (bitmap)
+ kvfree(bitmap);
+
+ if (bdev)
+ blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
+
+ if (backing_dev)
+ filp_close(backing_dev, NULL);
+
+ up_write(&zram->init_lock);
+
+ kfree(file_name);
+
+ return err;
+}
+
+static unsigned long get_entry_bdev(struct zram *zram)
+{
+ unsigned long entry;
+
+ spin_lock(&zram->bitmap_lock);
+ /* skip 0 bit to confuse zram.handle = 0 */
+ entry = find_next_zero_bit(zram->bitmap, zram->nr_pages, 1);
+ if (entry == zram->nr_pages) {
+ spin_unlock(&zram->bitmap_lock);
+ return 0;
+ }
+
+ set_bit(entry, zram->bitmap);
+ spin_unlock(&zram->bitmap_lock);
+
+ return entry;
+}
+
+static void put_entry_bdev(struct zram *zram, unsigned long entry)
+{
+ int was_set;
+
+ spin_lock(&zram->bitmap_lock);
+ was_set = test_and_clear_bit(entry, zram->bitmap);
+ spin_unlock(&zram->bitmap_lock);
+ WARN_ON_ONCE(!was_set);
+}
+
+void zram_page_end_io(struct bio *bio)
+{
+ struct page *page = bio->bi_io_vec[0].bv_page;
+
+ page_endio(page, op_is_write(bio_op(bio)),
+ blk_status_to_errno(bio->bi_status));
+ bio_put(bio);
+}
+
+/*
+ * Returns 1 if the submission is successful.
+ */
+static int read_from_bdev_async(struct zram *zram, struct bio_vec *bvec,
+ unsigned long entry, struct bio *parent)
+{
+ struct bio *bio;
+
+ bio = bio_alloc(GFP_ATOMIC, 1);
+ if (!bio)
+ return -ENOMEM;
+
+ bio->bi_iter.bi_sector = entry * (PAGE_SIZE >> 9);
+ bio_set_dev(bio, zram->bdev);
+ if (!bio_add_page(bio, bvec->bv_page, bvec->bv_len, bvec->bv_offset)) {
+ bio_put(bio);
+ return -EIO;
+ }
+
+ if (!parent) {
+ bio->bi_opf = REQ_OP_READ;
+ bio->bi_end_io = zram_page_end_io;
+ } else {
+ bio->bi_opf = parent->bi_opf;
+ bio_chain(bio, parent);
+ }
+
+ submit_bio(bio);
+ return 1;
+}
+
+struct zram_work {
+ struct work_struct work;
+ struct zram *zram;
+ unsigned long entry;
+ struct bio *bio;
+};
+
+#if PAGE_SIZE != 4096
+static void zram_sync_read(struct work_struct *work)
+{
+ struct bio_vec bvec;
+ struct zram_work *zw = container_of(work, struct zram_work, work);
+ struct zram *zram = zw->zram;
+ unsigned long entry = zw->entry;
+ struct bio *bio = zw->bio;
+
+ read_from_bdev_async(zram, &bvec, entry, bio);
+}
+
+/*
+ * Block layer want one ->make_request_fn to be active at a time
+ * so if we use chained IO with parent IO in same context,
+ * it's a deadlock. To avoid, it, it uses worker thread context.
+ */
+static int read_from_bdev_sync(struct zram *zram, struct bio_vec *bvec,
+ unsigned long entry, struct bio *bio)
+{
+ struct zram_work work;
+
+ work.zram = zram;
+ work.entry = entry;
+ work.bio = bio;
+
+ INIT_WORK_ONSTACK(&work.work, zram_sync_read);
+ queue_work(system_unbound_wq, &work.work);
+ flush_work(&work.work);
+ destroy_work_on_stack(&work.work);
+
+ return 1;
+}
+#else
+static int read_from_bdev_sync(struct zram *zram, struct bio_vec *bvec,
+ unsigned long entry, struct bio *bio)
+{
+ WARN_ON(1);
+ return -EIO;
+}
+#endif
+
+static int read_from_bdev(struct zram *zram, struct bio_vec *bvec,
+ unsigned long entry, struct bio *parent, bool sync)
+{
+ if (sync)
+ return read_from_bdev_sync(zram, bvec, entry, parent);
+ else
+ return read_from_bdev_async(zram, bvec, entry, parent);
+}
+
+static int write_to_bdev(struct zram *zram, struct bio_vec *bvec,
+ u32 index, struct bio *parent,
+ unsigned long *pentry)
+{
+ struct bio *bio;
+ unsigned long entry;
+
+ bio = bio_alloc(GFP_ATOMIC, 1);
+ if (!bio)
+ return -ENOMEM;
+
+ entry = get_entry_bdev(zram);
+ if (!entry) {
+ bio_put(bio);
+ return -ENOSPC;
+ }
+
+ bio->bi_iter.bi_sector = entry * (PAGE_SIZE >> 9);
+ bio_set_dev(bio, zram->bdev);
+ if (!bio_add_page(bio, bvec->bv_page, bvec->bv_len,
+ bvec->bv_offset)) {
+ bio_put(bio);
+ put_entry_bdev(zram, entry);
+ return -EIO;
+ }
+
+ if (!parent) {
+ bio->bi_opf = REQ_OP_WRITE | REQ_SYNC;
+ bio->bi_end_io = zram_page_end_io;
+ } else {
+ bio->bi_opf = parent->bi_opf;
+ bio_chain(bio, parent);
+ }
+
+ submit_bio(bio);
+ *pentry = entry;
+
+ return 0;
+}
+
+static void zram_wb_clear(struct zram *zram, u32 index)
+{
+ unsigned long entry;
+
+ zram_clear_flag(zram, index, ZRAM_WB);
+ entry = zram_get_element(zram, index);
+ zram_set_element(zram, index, 0);
+ put_entry_bdev(zram, entry);
+}
+
+#else
+static bool zram_wb_enabled(struct zram *zram) { return false; }
+static inline void reset_bdev(struct zram *zram) {};
+static int write_to_bdev(struct zram *zram, struct bio_vec *bvec,
+ u32 index, struct bio *parent,
+ unsigned long *pentry)
+
+{
+ return -EIO;
+}
+
+static int read_from_bdev(struct zram *zram, struct bio_vec *bvec,
+ unsigned long entry, struct bio *parent, bool sync)
+{
+ return -EIO;
+}
+static void zram_wb_clear(struct zram *zram, u32 index) {}
+#endif
+
+
/*
* We switched to per-cpu streams and this attr is not needed anymore.
* However, we will keep it around for some time, because:
@@ -453,30 +787,6 @@ static bool zram_same_page_read(struct zram *zram, u32 index,
return false;
}
-static bool zram_same_page_write(struct zram *zram, u32 index,
- struct page *page)
-{
- unsigned long element;
- void *mem = kmap_atomic(page);
-
- if (page_same_filled(mem, &element)) {
- kunmap_atomic(mem);
- /* Free memory associated with this sector now. */
- zram_slot_lock(zram, index);
- zram_free_page(zram, index);
- zram_set_flag(zram, index, ZRAM_SAME);
- zram_set_element(zram, index, element);
- zram_slot_unlock(zram, index);
-
- atomic64_inc(&zram->stats.same_pages);
- atomic64_inc(&zram->stats.pages_stored);
- return true;
- }
- kunmap_atomic(mem);
-
- return false;
-}
-
static void zram_meta_free(struct zram *zram, u64 disksize)
{
size_t num_pages = disksize >> PAGE_SHIFT;
@@ -515,7 +825,13 @@ static bool zram_meta_alloc(struct zram *zram, u64 disksize)
*/
static void zram_free_page(struct zram *zram, size_t index)
{
- unsigned long handle = zram_get_handle(zram, index);
+ unsigned long handle;
+
+ if (zram_wb_enabled(zram) && zram_test_flag(zram, index, ZRAM_WB)) {
+ zram_wb_clear(zram, index);
+ atomic64_dec(&zram->stats.pages_stored);
+ return;
+ }
/*
* No memory is allocated for same element filled pages.
@@ -529,6 +845,7 @@ static void zram_free_page(struct zram *zram, size_t index)
return;
}
+ handle = zram_get_handle(zram, index);
if (!handle)
return;
@@ -542,13 +859,31 @@ static void zram_free_page(struct zram *zram, size_t index)
zram_set_obj_size(zram, index, 0);
}
-static int zram_decompress_page(struct zram *zram, struct page *page, u32 index)
+static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index,
+ struct bio *bio, bool partial_io)
{
int ret;
unsigned long handle;
unsigned int size;
void *src, *dst;
+ if (zram_wb_enabled(zram)) {
+ zram_slot_lock(zram, index);
+ if (zram_test_flag(zram, index, ZRAM_WB)) {
+ struct bio_vec bvec;
+
+ zram_slot_unlock(zram, index);
+
+ bvec.bv_page = page;
+ bvec.bv_len = PAGE_SIZE;
+ bvec.bv_offset = 0;
+ return read_from_bdev(zram, &bvec,
+ zram_get_element(zram, index),
+ bio, partial_io);
+ }
+ zram_slot_unlock(zram, index);
+ }
+
if (zram_same_page_read(zram, index, page, 0, PAGE_SIZE))
return 0;
@@ -581,7 +916,7 @@ static int zram_decompress_page(struct zram *zram, struct page *page, u32 index)
}
static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
- u32 index, int offset)
+ u32 index, int offset, struct bio *bio)
{
int ret;
struct page *page;
@@ -594,7 +929,7 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
return -ENOMEM;
}
- ret = zram_decompress_page(zram, page, index);
+ ret = __zram_bvec_read(zram, page, index, bio, is_partial_io(bvec));
if (unlikely(ret))
goto out;
@@ -613,30 +948,57 @@ out:
return ret;
}
-static int zram_compress(struct zram *zram, struct zcomp_strm **zstrm,
- struct page *page,
- unsigned long *out_handle, unsigned int *out_comp_len)
+static int __zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
+ u32 index, struct bio *bio)
{
- int ret;
- unsigned int comp_len;
- void *src;
+ int ret = 0;
unsigned long alloced_pages;
unsigned long handle = 0;
+ unsigned int comp_len = 0;
+ void *src, *dst, *mem;
+ struct zcomp_strm *zstrm;
+ struct page *page = bvec->bv_page;
+ unsigned long element = 0;
+ enum zram_pageflags flags = 0;
+ bool allow_wb = true;
+
+ mem = kmap_atomic(page);
+ if (page_same_filled(mem, &element)) {
+ kunmap_atomic(mem);
+ /* Free memory associated with this sector now. */
+ flags = ZRAM_SAME;
+ atomic64_inc(&zram->stats.same_pages);
+ goto out;
+ }
+ kunmap_atomic(mem);
compress_again:
+ zstrm = zcomp_stream_get(zram->comp);
src = kmap_atomic(page);
- ret = zcomp_compress(*zstrm, src, &comp_len);
+ ret = zcomp_compress(zstrm, src, &comp_len);
kunmap_atomic(src);
if (unlikely(ret)) {
+ zcomp_stream_put(zram->comp);
pr_err("Compression failed! err=%d\n", ret);
- if (handle)
- zs_free(zram->mem_pool, handle);
+ zs_free(zram->mem_pool, handle);
return ret;
}
- if (unlikely(comp_len > max_zpage_size))
+ if (unlikely(comp_len > max_zpage_size)) {
+ if (zram_wb_enabled(zram) && allow_wb) {
+ zcomp_stream_put(zram->comp);
+ ret = write_to_bdev(zram, bvec, index, bio, &element);
+ if (!ret) {
+ flags = ZRAM_WB;
+ ret = 1;
+ goto out;
+ }
+ allow_wb = false;
+ goto compress_again;
+ }
comp_len = PAGE_SIZE;
+ }
/*
* handle allocation has 2 paths:
@@ -663,7 +1025,6 @@ compress_again:
handle = zs_malloc(zram->mem_pool, comp_len,
GFP_NOIO | __GFP_HIGHMEM |
__GFP_MOVABLE);
- *zstrm = zcomp_stream_get(zram->comp);
if (handle)
goto compress_again;
return -ENOMEM;
@@ -673,34 +1034,11 @@ compress_again:
update_used_max(zram, alloced_pages);
if (zram->limit_pages && alloced_pages > zram->limit_pages) {
+ zcomp_stream_put(zram->comp);
zs_free(zram->mem_pool, handle);
return -ENOMEM;
}
- *out_handle = handle;
- *out_comp_len = comp_len;
- return 0;
-}
-
-static int __zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index)
-{
- int ret;
- unsigned long handle;
- unsigned int comp_len;
- void *src, *dst;
- struct zcomp_strm *zstrm;
- struct page *page = bvec->bv_page;
-
- if (zram_same_page_write(zram, index, page))
- return 0;
-
- zstrm = zcomp_stream_get(zram->comp);
- ret = zram_compress(zram, &zstrm, page, &handle, &comp_len);
- if (ret) {
- zcomp_stream_put(zram->comp);
- return ret;
- }
-
dst = zs_map_object(zram->mem_pool, handle, ZS_MM_WO);
src = zstrm->buffer;
@@ -712,25 +1050,31 @@ static int __zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index)
zcomp_stream_put(zram->comp);
zs_unmap_object(zram->mem_pool, handle);
-
+ atomic64_add(comp_len, &zram->stats.compr_data_size);
+out:
/*
* Free memory associated with this sector
* before overwriting unused sectors.
*/
zram_slot_lock(zram, index);
zram_free_page(zram, index);
- zram_set_handle(zram, index, handle);
- zram_set_obj_size(zram, index, comp_len);
+
+ if (flags) {
+ zram_set_flag(zram, index, flags);
+ zram_set_element(zram, index, element);
+ } else {
+ zram_set_handle(zram, index, handle);
+ zram_set_obj_size(zram, index, comp_len);
+ }
zram_slot_unlock(zram, index);
/* Update stats */
- atomic64_add(comp_len, &zram->stats.compr_data_size);
atomic64_inc(&zram->stats.pages_stored);
- return 0;
+ return ret;
}
static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
- u32 index, int offset)
+ u32 index, int offset, struct bio *bio)
{
int ret;
struct page *page = NULL;
@@ -748,7 +1092,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
if (!page)
return -ENOMEM;
- ret = zram_decompress_page(zram, page, index);
+ ret = __zram_bvec_read(zram, page, index, bio, true);
if (ret)
goto out;
@@ -763,7 +1107,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
vec.bv_offset = 0;
}
- ret = __zram_bvec_write(zram, &vec, index);
+ ret = __zram_bvec_write(zram, &vec, index, bio);
out:
if (is_partial_io(bvec))
__free_page(page);
@@ -808,28 +1152,34 @@ static void zram_bio_discard(struct zram *zram, u32 index,
}
}
+/*
+ * Returns errno if it has some problem. Otherwise return 0 or 1.
+ * Returns 0 if IO request was done synchronously
+ * Returns 1 if IO request was successfully submitted.
+ */
static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
- int offset, bool is_write)
+ int offset, bool is_write, struct bio *bio)
{
unsigned long start_time = jiffies;
int rw_acct = is_write ? REQ_OP_WRITE : REQ_OP_READ;
+ struct request_queue *q = zram->disk->queue;
int ret;
- generic_start_io_acct(rw_acct, bvec->bv_len >> SECTOR_SHIFT,
+ generic_start_io_acct(q, rw_acct, bvec->bv_len >> SECTOR_SHIFT,
&zram->disk->part0);
if (!is_write) {
atomic64_inc(&zram->stats.num_reads);
- ret = zram_bvec_read(zram, bvec, index, offset);
+ ret = zram_bvec_read(zram, bvec, index, offset, bio);
flush_dcache_page(bvec->bv_page);
} else {
atomic64_inc(&zram->stats.num_writes);
- ret = zram_bvec_write(zram, bvec, index, offset);
+ ret = zram_bvec_write(zram, bvec, index, offset, bio);
}
- generic_end_io_acct(rw_acct, &zram->disk->part0, start_time);
+ generic_end_io_acct(q, rw_acct, &zram->disk->part0, start_time);
- if (unlikely(ret)) {
+ if (unlikely(ret < 0)) {
if (!is_write)
atomic64_inc(&zram->stats.failed_reads);
else
@@ -868,7 +1218,7 @@ static void __zram_make_request(struct zram *zram, struct bio *bio)
bv.bv_len = min_t(unsigned int, PAGE_SIZE - offset,
unwritten);
if (zram_bvec_rw(zram, &bv, index, offset,
- op_is_write(bio_op(bio))) < 0)
+ op_is_write(bio_op(bio)), bio) < 0)
goto out;
bv.bv_offset += bv.bv_len;
@@ -922,16 +1272,18 @@ static void zram_slot_free_notify(struct block_device *bdev,
static int zram_rw_page(struct block_device *bdev, sector_t sector,
struct page *page, bool is_write)
{
- int offset, err = -EIO;
+ int offset, ret;
u32 index;
struct zram *zram;
struct bio_vec bv;
+ if (PageTransHuge(page))
+ return -ENOTSUPP;
zram = bdev->bd_disk->private_data;
if (!valid_io_request(zram, sector, PAGE_SIZE)) {
atomic64_inc(&zram->stats.invalid_io);
- err = -EINVAL;
+ ret = -EINVAL;
goto out;
}
@@ -942,7 +1294,7 @@ static int zram_rw_page(struct block_device *bdev, sector_t sector,
bv.bv_len = PAGE_SIZE;
bv.bv_offset = 0;
- err = zram_bvec_rw(zram, &bv, index, offset, is_write);
+ ret = zram_bvec_rw(zram, &bv, index, offset, is_write, NULL);
out:
/*
* If I/O fails, just return error(ie, non-zero) without
@@ -952,9 +1304,20 @@ out:
* bio->bi_end_io does things to handle the error
* (e.g., SetPageError, set_page_dirty and extra works).
*/
- if (err == 0)
+ if (unlikely(ret < 0))
+ return ret;
+
+ switch (ret) {
+ case 0:
page_endio(page, is_write, 0);
- return err;
+ break;
+ case 1:
+ ret = 0;
+ break;
+ default:
+ WARN_ON(1);
+ }
+ return ret;
}
static void zram_reset_device(struct zram *zram)
@@ -983,6 +1346,7 @@ static void zram_reset_device(struct zram *zram)
zram_meta_free(zram, disksize);
memset(&zram->stats, 0, sizeof(zram->stats));
zcomp_destroy(comp);
+ reset_bdev(zram);
}
static ssize_t disksize_store(struct device *dev,
@@ -1108,6 +1472,9 @@ static DEVICE_ATTR_WO(mem_limit);
static DEVICE_ATTR_WO(mem_used_max);
static DEVICE_ATTR_RW(max_comp_streams);
static DEVICE_ATTR_RW(comp_algorithm);
+#ifdef CONFIG_ZRAM_WRITEBACK
+static DEVICE_ATTR_RW(backing_dev);
+#endif
static struct attribute *zram_disk_attrs[] = {
&dev_attr_disksize.attr,
@@ -1118,6 +1485,9 @@ static struct attribute *zram_disk_attrs[] = {
&dev_attr_mem_used_max.attr,
&dev_attr_max_comp_streams.attr,
&dev_attr_comp_algorithm.attr,
+#ifdef CONFIG_ZRAM_WRITEBACK
+ &dev_attr_backing_dev.attr,
+#endif
&dev_attr_io_stat.attr,
&dev_attr_mm_stat.attr,
&dev_attr_debug_stat.attr,
diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h
index e34e44d02e3e..31762db861e3 100644
--- a/drivers/block/zram/zram_drv.h
+++ b/drivers/block/zram/zram_drv.h
@@ -60,9 +60,10 @@ static const size_t max_zpage_size = PAGE_SIZE / 4 * 3;
/* Flags for zram pages (table[page_no].value) */
enum zram_pageflags {
- /* Page consists entirely of zeros */
+ /* Page consists the same element */
ZRAM_SAME = ZRAM_FLAG_SHIFT,
ZRAM_ACCESS, /* page is now accessed */
+ ZRAM_WB, /* page is stored on backing_device */
__NR_ZRAM_PAGEFLAGS,
};
@@ -115,5 +116,13 @@ struct zram {
* zram is claimed so open request will be failed
*/
bool claim; /* Protected by bdev->bd_mutex */
+#ifdef CONFIG_ZRAM_WRITEBACK
+ struct file *backing_dev;
+ struct block_device *bdev;
+ unsigned int old_block_size;
+ unsigned long *bitmap;
+ unsigned long nr_pages;
+ spinlock_t bitmap_lock;
+#endif
};
#endif
diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig
index 35952a94875e..fae5a74dc737 100644
--- a/drivers/bluetooth/Kconfig
+++ b/drivers/bluetooth/Kconfig
@@ -98,6 +98,7 @@ config BT_HCIUART_NOKIA
depends on BT_HCIUART_SERDEV
depends on PM
select BT_HCIUART_H4
+ select BT_BCM
help
Nokia H4+ is serial protocol for communication between Bluetooth
device and host. This protocol is required for Bluetooth devices
@@ -167,6 +168,7 @@ config BT_HCIUART_INTEL
config BT_HCIUART_BCM
bool "Broadcom protocol support"
depends on BT_HCIUART
+ depends on BT_HCIUART_SERDEV
select BT_HCIUART_H4
select BT_BCM
help
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
index b793853ff05f..204afe66de92 100644
--- a/drivers/bluetooth/ath3k.c
+++ b/drivers/bluetooth/ath3k.c
@@ -140,7 +140,8 @@ MODULE_DEVICE_TABLE(usb, ath3k_table);
#define BTUSB_ATH3012 0x80
/* This table is to load patch and sysconfig files
- * for AR3012 */
+ * for AR3012
+ */
static const struct usb_device_id ath3k_blist_tbl[] = {
/* Atheros AR3012 with sflash firmware*/
diff --git a/drivers/bluetooth/bluecard_cs.c b/drivers/bluetooth/bluecard_cs.c
index d4b0b655dde6..b07ca9565291 100644
--- a/drivers/bluetooth/bluecard_cs.c
+++ b/drivers/bluetooth/bluecard_cs.c
@@ -93,6 +93,7 @@ static void bluecard_detach(struct pcmcia_device *p_dev);
/* Hardware states */
#define CARD_READY 1
+#define CARD_ACTIVITY 2
#define CARD_HAS_PCCARD_ID 4
#define CARD_HAS_POWER_LED 5
#define CARD_HAS_ACTIVITY_LED 6
@@ -160,16 +161,14 @@ static void bluecard_activity_led_timeout(u_long arg)
struct bluecard_info *info = (struct bluecard_info *)arg;
unsigned int iobase = info->p_dev->resource[0]->start;
- if (!test_bit(CARD_HAS_PCCARD_ID, &(info->hw_state)))
- return;
-
- if (test_bit(CARD_HAS_ACTIVITY_LED, &(info->hw_state))) {
- /* Disable activity LED */
- outb(0x08 | 0x20, iobase + 0x30);
- } else {
- /* Disable power LED */
- outb(0x00, iobase + 0x30);
+ if (test_bit(CARD_ACTIVITY, &(info->hw_state))) {
+ /* leave LED in inactive state for HZ/10 for blink effect */
+ clear_bit(CARD_ACTIVITY, &(info->hw_state));
+ mod_timer(&(info->timer), jiffies + HZ / 10);
}
+
+ /* Disable activity LED, enable power LED */
+ outb(0x08 | 0x20, iobase + 0x30);
}
@@ -177,22 +176,22 @@ static void bluecard_enable_activity_led(struct bluecard_info *info)
{
unsigned int iobase = info->p_dev->resource[0]->start;
- if (!test_bit(CARD_HAS_PCCARD_ID, &(info->hw_state)))
+ /* don't disturb running blink timer */
+ if (timer_pending(&(info->timer)))
return;
- if (test_bit(CARD_HAS_ACTIVITY_LED, &(info->hw_state))) {
- /* Enable activity LED */
- outb(0x10 | 0x40, iobase + 0x30);
+ set_bit(CARD_ACTIVITY, &(info->hw_state));
- /* Stop the LED after HZ/4 */
- mod_timer(&(info->timer), jiffies + HZ / 4);
+ if (test_bit(CARD_HAS_ACTIVITY_LED, &(info->hw_state))) {
+ /* Enable activity LED, keep power LED enabled */
+ outb(0x18 | 0x60, iobase + 0x30);
} else {
- /* Enable power LED */
- outb(0x08 | 0x20, iobase + 0x30);
-
- /* Stop the LED after HZ/2 */
- mod_timer(&(info->timer), jiffies + HZ / 2);
+ /* Disable power LED */
+ outb(0x00, iobase + 0x30);
}
+
+ /* Stop the LED after HZ/10 */
+ mod_timer(&(info->timer), jiffies + HZ / 10);
}
@@ -625,16 +624,13 @@ static int bluecard_hci_flush(struct hci_dev *hdev)
static int bluecard_hci_open(struct hci_dev *hdev)
{
struct bluecard_info *info = hci_get_drvdata(hdev);
+ unsigned int iobase = info->p_dev->resource[0]->start;
if (test_bit(CARD_HAS_PCCARD_ID, &(info->hw_state)))
bluecard_hci_set_baud_rate(hdev, DEFAULT_BAUD_RATE);
- if (test_bit(CARD_HAS_PCCARD_ID, &(info->hw_state))) {
- unsigned int iobase = info->p_dev->resource[0]->start;
-
- /* Enable LED */
- outb(0x08 | 0x20, iobase + 0x30);
- }
+ /* Enable power LED */
+ outb(0x08 | 0x20, iobase + 0x30);
return 0;
}
@@ -643,15 +639,15 @@ static int bluecard_hci_open(struct hci_dev *hdev)
static int bluecard_hci_close(struct hci_dev *hdev)
{
struct bluecard_info *info = hci_get_drvdata(hdev);
+ unsigned int iobase = info->p_dev->resource[0]->start;
bluecard_hci_flush(hdev);
- if (test_bit(CARD_HAS_PCCARD_ID, &(info->hw_state))) {
- unsigned int iobase = info->p_dev->resource[0]->start;
+ /* Stop LED timer */
+ del_timer_sync(&(info->timer));
- /* Disable LED */
- outb(0x00, iobase + 0x30);
- }
+ /* Disable power LED */
+ outb(0x00, iobase + 0x30);
return 0;
}
diff --git a/drivers/bluetooth/bt3c_cs.c b/drivers/bluetooth/bt3c_cs.c
index 32dcac017395..194788739a83 100644
--- a/drivers/bluetooth/bt3c_cs.c
+++ b/drivers/bluetooth/bt3c_cs.c
@@ -684,14 +684,16 @@ static int bt3c_config(struct pcmcia_device *link)
unsigned long try;
/* First pass: look for a config entry that looks normal.
- Two tries: without IO aliases, then with aliases */
+ * Two tries: without IO aliases, then with aliases
+ */
for (try = 0; try < 2; try++)
if (!pcmcia_loop_config(link, bt3c_check_config, (void *) try))
goto found_port;
/* Second pass: try to find an entry that isn't picky about
- its base address, then try to grab any standard serial port
- address, and finally try to get any free port. */
+ * its base address, then try to grab any standard serial port
+ * address, and finally try to get any free port.
+ */
if (!pcmcia_loop_config(link, bt3c_check_config_notpicky, NULL))
goto found_port;
diff --git a/drivers/bluetooth/btbcm.c b/drivers/bluetooth/btbcm.c
index 9ab6cfbb831d..cc4bdefa6648 100644
--- a/drivers/bluetooth/btbcm.c
+++ b/drivers/bluetooth/btbcm.c
@@ -287,6 +287,37 @@ static struct sk_buff *btbcm_read_usb_product(struct hci_dev *hdev)
return skb;
}
+static int btbcm_read_info(struct hci_dev *hdev)
+{
+ struct sk_buff *skb;
+
+ /* Read Verbose Config Version Info */
+ skb = btbcm_read_verbose_config(hdev);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ BT_INFO("%s: BCM: chip id %u", hdev->name, skb->data[1]);
+ kfree_skb(skb);
+
+ /* Read Controller Features */
+ skb = btbcm_read_controller_features(hdev);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ BT_INFO("%s: BCM: features 0x%2.2x", hdev->name, skb->data[1]);
+ kfree_skb(skb);
+
+ /* Read Local Name */
+ skb = btbcm_read_local_name(hdev);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ BT_INFO("%s: %s", hdev->name, (char *)(skb->data + 1));
+ kfree_skb(skb);
+
+ return 0;
+}
+
static const struct {
u16 subver;
const char *name;
@@ -322,13 +353,10 @@ int btbcm_initialize(struct hci_dev *hdev, char *fw_name, size_t len)
subver = le16_to_cpu(ver->lmp_subver);
kfree_skb(skb);
- /* Read Verbose Config Version Info */
- skb = btbcm_read_verbose_config(hdev);
- if (IS_ERR(skb))
- return PTR_ERR(skb);
-
- BT_INFO("%s: BCM: chip id %u", hdev->name, skb->data[1]);
- kfree_skb(skb);
+ /* Read controller information */
+ err = btbcm_read_info(hdev);
+ if (err)
+ return err;
switch ((rev & 0xf000) >> 12) {
case 0:
@@ -431,29 +459,10 @@ int btbcm_setup_patchram(struct hci_dev *hdev)
subver = le16_to_cpu(ver->lmp_subver);
kfree_skb(skb);
- /* Read Verbose Config Version Info */
- skb = btbcm_read_verbose_config(hdev);
- if (IS_ERR(skb))
- return PTR_ERR(skb);
-
- BT_INFO("%s: BCM: chip id %u", hdev->name, skb->data[1]);
- kfree_skb(skb);
-
- /* Read Controller Features */
- skb = btbcm_read_controller_features(hdev);
- if (IS_ERR(skb))
- return PTR_ERR(skb);
-
- BT_INFO("%s: BCM: features 0x%2.2x", hdev->name, skb->data[1]);
- kfree_skb(skb);
-
- /* Read Local Name */
- skb = btbcm_read_local_name(hdev);
- if (IS_ERR(skb))
- return PTR_ERR(skb);
-
- BT_INFO("%s: %s", hdev->name, (char *)(skb->data + 1));
- kfree_skb(skb);
+ /* Read controller information */
+ err = btbcm_read_info(hdev);
+ if (err)
+ return err;
switch ((rev & 0xf000) >> 12) {
case 0:
diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c
index eb794f08b238..03341ce98c32 100644
--- a/drivers/bluetooth/btmrvl_sdio.c
+++ b/drivers/bluetooth/btmrvl_sdio.c
@@ -1455,7 +1455,8 @@ done:
fw_dump_ptr = fw_dump_data;
/* Dump all the memory data into single file, a userspace script will
- be used to split all the memory data to multiple files*/
+ * be used to split all the memory data to multiple files
+ */
BT_INFO("== btmrvl firmware dump to /sys/class/devcoredump start");
for (idx = 0; idx < dump_num; idx++) {
struct memory_type_mapping *entry = &mem_type_mapping_tbl[idx];
@@ -1482,7 +1483,8 @@ done:
}
/* fw_dump_data will be free in device coredump release function
- after 5 min*/
+ * after 5 min
+ */
dev_coredumpv(&card->func->dev, fw_dump_data, fw_dump_len, GFP_KERNEL);
BT_INFO("== btmrvl firmware dump to /sys/class/devcoredump end");
}
diff --git a/drivers/bluetooth/btqca.c b/drivers/bluetooth/btqca.c
index 28afd5d585f9..0bbdfcef2aa8 100644
--- a/drivers/bluetooth/btqca.c
+++ b/drivers/bluetooth/btqca.c
@@ -81,7 +81,7 @@ static int rome_patch_ver_req(struct hci_dev *hdev, u32 *rome_version)
* and lower 2 bytes from patch will be used.
*/
*rome_version = (le32_to_cpu(ver->soc_id) << 16) |
- (le16_to_cpu(ver->rome_ver) & 0x0000ffff);
+ (le16_to_cpu(ver->rome_ver) & 0x0000ffff);
out:
kfree_skb(skb);
diff --git a/drivers/bluetooth/btrtl.c b/drivers/bluetooth/btrtl.c
index 8279094dd713..d9a99b4302ea 100644
--- a/drivers/bluetooth/btrtl.c
+++ b/drivers/bluetooth/btrtl.c
@@ -279,6 +279,8 @@ static int rtl_load_config(struct hci_dev *hdev, const char *name, u8 **buff)
return ret;
ret = fw->size;
*buff = kmemdup(fw->data, ret, GFP_KERNEL);
+ if (!*buff)
+ ret = -ENOMEM;
release_firmware(fw);
diff --git a/drivers/bluetooth/btsdio.c b/drivers/bluetooth/btsdio.c
index 1cb958e199eb..c8e945d19ffe 100644
--- a/drivers/bluetooth/btsdio.c
+++ b/drivers/bluetooth/btsdio.c
@@ -144,7 +144,8 @@ static int btsdio_rx_packet(struct btsdio_data *data)
if (!skb) {
/* Out of memory. Prepare a read retry and just
* return with the expectation that the next time
- * we're called we'll have more memory. */
+ * we're called we'll have more memory.
+ */
return -ENOMEM;
}
diff --git a/drivers/bluetooth/btuart_cs.c b/drivers/bluetooth/btuart_cs.c
index 7df79bb12350..310e9c2e09b6 100644
--- a/drivers/bluetooth/btuart_cs.c
+++ b/drivers/bluetooth/btuart_cs.c
@@ -614,14 +614,16 @@ static int btuart_config(struct pcmcia_device *link)
int try;
/* First pass: look for a config entry that looks normal.
- Two tries: without IO aliases, then with aliases */
+ * Two tries: without IO aliases, then with aliases
+ */
for (try = 0; try < 2; try++)
if (!pcmcia_loop_config(link, btuart_check_config, &try))
goto found_port;
/* Second pass: try to find an entry that isn't picky about
- its base address, then try to grab any standard serial port
- address, and finally try to get any free port. */
+ * its base address, then try to grab any standard serial port
+ * address, and finally try to get any free port.
+ */
if (!pcmcia_loop_config(link, btuart_check_config_notpicky, NULL))
goto found_port;
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index fa24d693af24..7a5c06aaa181 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -66,6 +66,7 @@ static struct usb_driver btusb_driver;
#define BTUSB_BCM2045 0x40000
#define BTUSB_IFNUM_2 0x80000
#define BTUSB_CW6622 0x100000
+#define BTUSB_BCM_NO_PRODID 0x200000
static const struct usb_device_id btusb_table[] = {
/* Generic Bluetooth USB device */
@@ -131,7 +132,8 @@ static const struct usb_device_id btusb_table[] = {
{ USB_DEVICE(0x19ff, 0x0239), .driver_info = BTUSB_BCM_PATCHRAM },
/* Broadcom BCM43142A0 (Foxconn/Lenovo) */
- { USB_DEVICE(0x105b, 0xe065), .driver_info = BTUSB_BCM_PATCHRAM },
+ { USB_VENDOR_AND_INTERFACE_INFO(0x105b, 0xff, 0x01, 0x01),
+ .driver_info = BTUSB_BCM_PATCHRAM },
/* Broadcom BCM920703 (HTC Vive) */
{ USB_VENDOR_AND_INTERFACE_INFO(0x0bb4, 0xff, 0x01, 0x01),
@@ -169,6 +171,10 @@ static const struct usb_device_id btusb_table[] = {
{ USB_VENDOR_AND_INTERFACE_INFO(0x0930, 0xff, 0x01, 0x01),
.driver_info = BTUSB_BCM_PATCHRAM },
+ /* Broadcom devices with missing product id */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x0000, 0x0000, 0xff, 0x01, 0x01),
+ .driver_info = BTUSB_BCM_PATCHRAM | BTUSB_BCM_NO_PRODID },
+
/* Intel Bluetooth USB Bootloader (RAM module) */
{ USB_DEVICE(0x8087, 0x0a5a),
.driver_info = BTUSB_INTEL_BOOT | BTUSB_BROKEN_ISOC },
@@ -268,6 +274,7 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x0489, 0xe092), .driver_info = BTUSB_QCA_ROME },
{ USB_DEVICE(0x0489, 0xe0a2), .driver_info = BTUSB_QCA_ROME },
{ USB_DEVICE(0x04ca, 0x3011), .driver_info = BTUSB_QCA_ROME },
+ { USB_DEVICE(0x04ca, 0x3016), .driver_info = BTUSB_QCA_ROME },
/* Broadcom BCM2035 */
{ USB_DEVICE(0x0a5c, 0x2009), .driver_info = BTUSB_BCM92035 },
@@ -357,6 +364,7 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x13d3, 0x3410), .driver_info = BTUSB_REALTEK },
{ USB_DEVICE(0x13d3, 0x3416), .driver_info = BTUSB_REALTEK },
{ USB_DEVICE(0x13d3, 0x3459), .driver_info = BTUSB_REALTEK },
+ { USB_DEVICE(0x13d3, 0x3494), .driver_info = BTUSB_REALTEK },
/* Additional Realtek 8821AE Bluetooth devices */
{ USB_DEVICE(0x0b05, 0x17dc), .driver_info = BTUSB_REALTEK },
@@ -656,7 +664,8 @@ static void btusb_intr_complete(struct urb *urb)
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err < 0) {
/* -EPERM: urb is being killed;
- * -ENODEV: device got disconnected */
+ * -ENODEV: device got disconnected
+ */
if (err != -EPERM && err != -ENODEV)
BT_ERR("%s urb %p failed to resubmit (%d)",
hdev->name, urb, -err);
@@ -745,7 +754,8 @@ static void btusb_bulk_complete(struct urb *urb)
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err < 0) {
/* -EPERM: urb is being killed;
- * -ENODEV: device got disconnected */
+ * -ENODEV: device got disconnected
+ */
if (err != -EPERM && err != -ENODEV)
BT_ERR("%s urb %p failed to resubmit (%d)",
hdev->name, urb, -err);
@@ -840,7 +850,8 @@ static void btusb_isoc_complete(struct urb *urb)
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err < 0) {
/* -EPERM: urb is being killed;
- * -ENODEV: device got disconnected */
+ * -ENODEV: device got disconnected
+ */
if (err != -EPERM && err != -ENODEV)
BT_ERR("%s urb %p failed to resubmit (%d)",
hdev->name, urb, -err);
@@ -952,7 +963,8 @@ static void btusb_diag_complete(struct urb *urb)
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err < 0) {
/* -EPERM: urb is being killed;
- * -ENODEV: device got disconnected */
+ * -ENODEV: device got disconnected
+ */
if (err != -EPERM && err != -ENODEV)
BT_ERR("%s urb %p failed to resubmit (%d)",
hdev->name, urb, -err);
@@ -1076,6 +1088,10 @@ static int btusb_open(struct hci_dev *hdev)
}
data->intf->needs_remote_wakeup = 1;
+ /* device specific wakeup source enabled and required for USB
+ * remote wakeup while host is suspended
+ */
+ device_wakeup_enable(&data->udev->dev);
if (test_and_set_bit(BTUSB_INTR_RUNNING, &data->flags))
goto done;
@@ -1139,6 +1155,7 @@ static int btusb_close(struct hci_dev *hdev)
goto failed;
data->intf->needs_remote_wakeup = 0;
+ device_wakeup_disable(&data->udev->dev);
usb_autopm_put_interface(data->intf);
failed:
@@ -2892,11 +2909,25 @@ static int btusb_probe(struct usb_interface *intf,
if (id->driver_info == BTUSB_IGNORE)
return -ENODEV;
+ if (id->driver_info & BTUSB_BCM_NO_PRODID) {
+ struct usb_device *udev = interface_to_usbdev(intf);
+
+ /* For the broken Broadcom devices that show 0000:0000
+ * as USB vendor and product information, check that the
+ * manufacturer string identifies them as Broadcom based
+ * devices.
+ */
+ if (!udev->manufacturer ||
+ strcmp(udev->manufacturer, "Broadcom Corp"))
+ return -ENODEV;
+ }
+
if (id->driver_info & BTUSB_ATH3012) {
struct usb_device *udev = interface_to_usbdev(intf);
/* Old firmware would otherwise let ath3k driver load
- * patch and sysconfig files */
+ * patch and sysconfig files
+ */
if (le16_to_cpu(udev->descriptor.bcdDevice) <= 0x0001)
return -ENODEV;
}
@@ -3067,6 +3098,12 @@ static int btusb_probe(struct usb_interface *intf,
if (id->driver_info & BTUSB_QCA_ROME) {
data->setup_on_usb = btusb_setup_qca;
hdev->set_bdaddr = btusb_set_bdaddr_ath3012;
+
+ /* QCA Rome devices lose their updated firmware over suspend,
+ * but the USB hub doesn't notice any status change.
+ * Explicitly request a device reset on resume.
+ */
+ set_bit(BTUSB_RESET_RESUME, &data->flags);
}
#ifdef CONFIG_BT_HCIBTUSB_RTL
@@ -3259,13 +3296,28 @@ static void play_deferred(struct btusb_data *data)
int err;
while ((urb = usb_get_from_anchor(&data->deferred))) {
+ usb_anchor_urb(urb, &data->tx_anchor);
+
err = usb_submit_urb(urb, GFP_ATOMIC);
- if (err < 0)
+ if (err < 0) {
+ if (err != -EPERM && err != -ENODEV)
+ BT_ERR("%s urb %p submission failed (%d)",
+ data->hdev->name, urb, -err);
+ kfree(urb->setup_packet);
+ usb_unanchor_urb(urb);
+ usb_free_urb(urb);
break;
+ }
data->tx_in_flight++;
+ usb_free_urb(urb);
+ }
+
+ /* Cleanup the rest deferred urbs. */
+ while ((urb = usb_get_from_anchor(&data->deferred))) {
+ kfree(urb->setup_packet);
+ usb_free_urb(urb);
}
- usb_scuttle_anchored_urbs(&data->deferred);
}
static int btusb_resume(struct usb_interface *intf)
diff --git a/drivers/bluetooth/btwilink.c b/drivers/bluetooth/btwilink.c
index 85a3978b064f..5ef8000f90a9 100644
--- a/drivers/bluetooth/btwilink.c
+++ b/drivers/bluetooth/btwilink.c
@@ -93,8 +93,7 @@ static void st_reg_completion_cb(void *priv_data, int data)
complete(&lhst->wait_reg_completion);
}
-/* Called by Shared Transport layer when receive data is
- * available */
+/* Called by Shared Transport layer when receive data is available */
static long st_receive(void *priv_data, struct sk_buff *skb)
{
struct ti_st *lhst = priv_data;
@@ -198,7 +197,8 @@ static int ti_st_open(struct hci_dev *hdev)
}
/* Is ST registration callback
- * called with ERROR status? */
+ * called with ERROR status?
+ */
if (hst->reg_status != 0) {
BT_ERR("ST registration completed with invalid "
"status %d", hst->reg_status);
@@ -276,7 +276,7 @@ static int ti_st_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
static int bt_ti_probe(struct platform_device *pdev)
{
- static struct ti_st *hst;
+ struct ti_st *hst;
struct hci_dev *hdev;
int err;
diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c
index 6a662d0161b4..e2540113d0da 100644
--- a/drivers/bluetooth/hci_bcm.c
+++ b/drivers/bluetooth/hci_bcm.c
@@ -27,6 +27,8 @@
#include <linux/firmware.h>
#include <linux/module.h>
#include <linux/acpi.h>
+#include <linux/of.h>
+#include <linux/property.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/gpio/consumer.h>
@@ -34,6 +36,7 @@
#include <linux/interrupt.h>
#include <linux/dmi.h>
#include <linux/pm_runtime.h>
+#include <linux/serdev.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
@@ -41,11 +44,15 @@
#include "btbcm.h"
#include "hci_uart.h"
+#define BCM_NULL_PKT 0x00
+#define BCM_NULL_SIZE 0
+
#define BCM_LM_DIAG_PKT 0x07
#define BCM_LM_DIAG_SIZE 63
#define BCM_AUTOSUSPEND_DELAY 5000 /* default autosleep delay */
+/* platform device driver resources */
struct bcm_device {
struct list_head list;
@@ -59,6 +66,7 @@ struct bcm_device {
bool clk_enabled;
u32 init_speed;
+ u32 oper_speed;
int irq;
u8 irq_polarity;
@@ -68,6 +76,12 @@ struct bcm_device {
#endif
};
+/* serdev driver resources */
+struct bcm_serdev {
+ struct hci_uart hu;
+};
+
+/* generic bcm uart resources */
struct bcm_data {
struct sk_buff *rx_skb;
struct sk_buff_head txq;
@@ -79,6 +93,14 @@ struct bcm_data {
static DEFINE_MUTEX(bcm_device_lock);
static LIST_HEAD(bcm_device_list);
+static inline void host_set_baudrate(struct hci_uart *hu, unsigned int speed)
+{
+ if (hu->serdev)
+ serdev_device_set_baudrate(hu->serdev, speed);
+ else
+ hci_uart_set_baudrate(hu, speed);
+}
+
static int bcm_set_baudrate(struct hci_uart *hu, unsigned int speed)
{
struct hci_dev *hdev = hu->hdev;
@@ -176,7 +198,7 @@ static irqreturn_t bcm_host_wake(int irq, void *data)
static int bcm_request_irq(struct bcm_data *bcm)
{
struct bcm_device *bdev = bcm->dev;
- int err = 0;
+ int err;
/* If this is not a platform device, do not enable PM functionalities */
mutex_lock(&bcm_device_lock);
@@ -185,21 +207,23 @@ static int bcm_request_irq(struct bcm_data *bcm)
goto unlock;
}
- if (bdev->irq > 0) {
- err = devm_request_irq(&bdev->pdev->dev, bdev->irq,
- bcm_host_wake, IRQF_TRIGGER_RISING,
- "host_wake", bdev);
- if (err)
- goto unlock;
+ if (bdev->irq <= 0) {
+ err = -EOPNOTSUPP;
+ goto unlock;
+ }
+
+ err = devm_request_irq(&bdev->pdev->dev, bdev->irq, bcm_host_wake,
+ IRQF_TRIGGER_RISING, "host_wake", bdev);
+ if (err)
+ goto unlock;
- device_init_wakeup(&bdev->pdev->dev, true);
+ device_init_wakeup(&bdev->pdev->dev, true);
- pm_runtime_set_autosuspend_delay(&bdev->pdev->dev,
- BCM_AUTOSUSPEND_DELAY);
- pm_runtime_use_autosuspend(&bdev->pdev->dev);
- pm_runtime_set_active(&bdev->pdev->dev);
- pm_runtime_enable(&bdev->pdev->dev);
- }
+ pm_runtime_set_autosuspend_delay(&bdev->pdev->dev,
+ BCM_AUTOSUSPEND_DELAY);
+ pm_runtime_use_autosuspend(&bdev->pdev->dev);
+ pm_runtime_set_active(&bdev->pdev->dev);
+ pm_runtime_enable(&bdev->pdev->dev);
unlock:
mutex_unlock(&bcm_device_lock);
@@ -287,6 +311,14 @@ static int bcm_open(struct hci_uart *hu)
hu->priv = bcm;
+ /* If this is a serdev defined device, then only use
+ * serdev open primitive and skip the rest.
+ */
+ if (hu->serdev) {
+ serdev_device_open(hu->serdev);
+ goto out;
+ }
+
if (!hu->tty->dev)
goto out;
@@ -301,6 +333,7 @@ static int bcm_open(struct hci_uart *hu)
if (hu->tty->dev->parent == dev->pdev->dev.parent) {
bcm->dev = dev;
hu->init_speed = dev->init_speed;
+ hu->oper_speed = dev->oper_speed;
#ifdef CONFIG_PM
dev->hu = hu;
#endif
@@ -321,6 +354,12 @@ static int bcm_close(struct hci_uart *hu)
bt_dev_dbg(hu->hdev, "hu %p", hu);
+ /* If this is a serdev defined device, only use serdev
+ * close primitive and then continue as usual.
+ */
+ if (hu->serdev)
+ serdev_device_close(hu->serdev);
+
/* Protect bcm->dev against removal of the device or driver */
mutex_lock(&bcm_device_lock);
if (bcm_device_exists(bdev)) {
@@ -396,7 +435,7 @@ static int bcm_setup(struct hci_uart *hu)
speed = 0;
if (speed)
- hci_uart_set_baudrate(hu, speed);
+ host_set_baudrate(hu, speed);
/* Operational speed if any */
if (hu->oper_speed)
@@ -409,7 +448,7 @@ static int bcm_setup(struct hci_uart *hu)
if (speed) {
err = bcm_set_baudrate(hu, speed);
if (!err)
- hci_uart_set_baudrate(hu, speed);
+ host_set_baudrate(hu, speed);
}
finalize:
@@ -432,11 +471,19 @@ finalize:
.lsize = 0, \
.maxlen = BCM_LM_DIAG_SIZE
+#define BCM_RECV_NULL \
+ .type = BCM_NULL_PKT, \
+ .hlen = BCM_NULL_SIZE, \
+ .loff = 0, \
+ .lsize = 0, \
+ .maxlen = BCM_NULL_SIZE
+
static const struct h4_recv_pkt bcm_recv_pkts[] = {
{ H4_RECV_ACL, .recv = hci_recv_frame },
{ H4_RECV_SCO, .recv = hci_recv_frame },
{ H4_RECV_EVENT, .recv = hci_recv_frame },
{ BCM_RECV_LM_DIAG, .recv = hci_recv_diag },
+ { BCM_RECV_NULL, .recv = hci_recv_diag },
};
static int bcm_recv(struct hci_uart *hu, const void *data, int count)
@@ -697,8 +744,10 @@ static int bcm_resource(struct acpi_resource *ares, void *data)
case ACPI_RESOURCE_TYPE_SERIAL_BUS:
sb = &ares->data.uart_serial_bus;
- if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_UART)
+ if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_UART) {
dev->init_speed = sb->default_baud_rate;
+ dev->oper_speed = 4000000;
+ }
break;
default:
@@ -851,7 +900,6 @@ static const struct hci_uart_proto bcm_proto = {
.name = "Broadcom",
.manufacturer = 15,
.init_speed = 115200,
- .oper_speed = 4000000,
.open = bcm_open,
.close = bcm_close,
.flush = bcm_flush,
@@ -901,9 +949,57 @@ static struct platform_driver bcm_driver = {
},
};
+static int bcm_serdev_probe(struct serdev_device *serdev)
+{
+ struct bcm_serdev *bcmdev;
+ u32 speed;
+ int err;
+
+ bcmdev = devm_kzalloc(&serdev->dev, sizeof(*bcmdev), GFP_KERNEL);
+ if (!bcmdev)
+ return -ENOMEM;
+
+ bcmdev->hu.serdev = serdev;
+ serdev_device_set_drvdata(serdev, bcmdev);
+
+ err = device_property_read_u32(&serdev->dev, "max-speed", &speed);
+ if (!err)
+ bcmdev->hu.oper_speed = speed;
+
+ return hci_uart_register_device(&bcmdev->hu, &bcm_proto);
+}
+
+static void bcm_serdev_remove(struct serdev_device *serdev)
+{
+ struct bcm_serdev *bcmdev = serdev_device_get_drvdata(serdev);
+
+ hci_uart_unregister_device(&bcmdev->hu);
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id bcm_bluetooth_of_match[] = {
+ { .compatible = "brcm,bcm43438-bt" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, bcm_bluetooth_of_match);
+#endif
+
+static struct serdev_device_driver bcm_serdev_driver = {
+ .probe = bcm_serdev_probe,
+ .remove = bcm_serdev_remove,
+ .driver = {
+ .name = "hci_uart_bcm",
+ .of_match_table = of_match_ptr(bcm_bluetooth_of_match),
+ },
+};
+
int __init bcm_init(void)
{
+ /* For now, we need to keep both platform device
+ * driver (ACPI generated) and serdev driver (DT).
+ */
platform_driver_register(&bcm_driver);
+ serdev_device_driver_register(&bcm_serdev_driver);
return hci_uart_register_proto(&bcm_proto);
}
@@ -911,6 +1007,7 @@ int __init bcm_init(void)
int __exit bcm_deinit(void)
{
platform_driver_unregister(&bcm_driver);
+ serdev_device_driver_unregister(&bcm_serdev_driver);
return hci_uart_unregister_proto(&bcm_proto);
}
diff --git a/drivers/bluetooth/hci_h4.c b/drivers/bluetooth/hci_h4.c
index 4e328d7d47bb..3b82a87224a9 100644
--- a/drivers/bluetooth/hci_h4.c
+++ b/drivers/bluetooth/hci_h4.c
@@ -172,7 +172,7 @@ struct sk_buff *h4_recv_buf(struct hci_dev *hdev, struct sk_buff *skb,
const struct h4_recv_pkt *pkts, int pkts_count)
{
struct hci_uart *hu = hci_get_drvdata(hdev);
- u8 alignment = hu->alignment;
+ u8 alignment = hu->alignment ? hu->alignment : 1;
while (count) {
int i, len;
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index 8397b716fa65..a746627e784e 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -457,7 +457,8 @@ static int hci_uart_tty_open(struct tty_struct *tty)
BT_DBG("tty %p", tty);
/* Error if the tty has no write op instead of leaving an exploitable
- hole */
+ * hole
+ */
if (tty->ops->write == NULL)
return -EOPNOTSUPP;
diff --git a/drivers/bluetooth/hci_ll.c b/drivers/bluetooth/hci_ll.c
index c982943f0747..424c15aa7bb7 100644
--- a/drivers/bluetooth/hci_ll.c
+++ b/drivers/bluetooth/hci_ll.c
@@ -622,7 +622,8 @@ static int download_firmware(struct ll_device *lldev)
cmd = (struct hci_command *)action_ptr;
if (cmd->opcode == 0xff36) {
/* ignore remote change
- * baud rate HCI VS command */
+ * baud rate HCI VS command
+ */
bt_dev_warn(lldev->hu.hdev, "change remote baud rate command in firmware");
break;
}
@@ -742,14 +743,8 @@ static int hci_ti_probe(struct serdev_device *serdev)
static void hci_ti_remove(struct serdev_device *serdev)
{
struct ll_device *lldev = serdev_device_get_drvdata(serdev);
- struct hci_uart *hu = &lldev->hu;
- struct hci_dev *hdev = hu->hdev;
- cancel_work_sync(&hu->write_work);
-
- hci_unregister_dev(hdev);
- hci_free_dev(hdev);
- hu->proto->close(hu);
+ hci_uart_unregister_device(&lldev->hu);
}
static const struct of_device_id hci_ti_of_match[] = {
diff --git a/drivers/bluetooth/hci_nokia.c b/drivers/bluetooth/hci_nokia.c
index 181a15b549e5..3539fd03f47e 100644
--- a/drivers/bluetooth/hci_nokia.c
+++ b/drivers/bluetooth/hci_nokia.c
@@ -767,16 +767,8 @@ static int nokia_bluetooth_serdev_probe(struct serdev_device *serdev)
static void nokia_bluetooth_serdev_remove(struct serdev_device *serdev)
{
struct nokia_bt_dev *btdev = serdev_device_get_drvdata(serdev);
- struct hci_uart *hu = &btdev->hu;
- struct hci_dev *hdev = hu->hdev;
- cancel_work_sync(&hu->write_work);
-
- hci_unregister_dev(hdev);
- hci_free_dev(hdev);
- hu->proto->close(hu);
-
- pm_runtime_disable(&btdev->serdev->dev);
+ hci_uart_unregister_device(&btdev->hu);
}
static int nokia_bluetooth_runtime_suspend(struct device *dev)
diff --git a/drivers/bluetooth/hci_serdev.c b/drivers/bluetooth/hci_serdev.c
index aea930101dd2..b725ac4f7ff6 100644
--- a/drivers/bluetooth/hci_serdev.c
+++ b/drivers/bluetooth/hci_serdev.c
@@ -354,3 +354,16 @@ err_alloc:
return err;
}
EXPORT_SYMBOL_GPL(hci_uart_register_device);
+
+void hci_uart_unregister_device(struct hci_uart *hu)
+{
+ struct hci_dev *hdev = hu->hdev;
+
+ hci_unregister_dev(hdev);
+ hci_free_dev(hdev);
+
+ cancel_work_sync(&hu->write_work);
+
+ hu->proto->close(hu);
+}
+EXPORT_SYMBOL_GPL(hci_uart_unregister_device);
diff --git a/drivers/bluetooth/hci_uart.h b/drivers/bluetooth/hci_uart.h
index c6e9e1cf63f8..d9cd95d81149 100644
--- a/drivers/bluetooth/hci_uart.h
+++ b/drivers/bluetooth/hci_uart.h
@@ -112,6 +112,7 @@ struct hci_uart {
int hci_uart_register_proto(const struct hci_uart_proto *p);
int hci_uart_unregister_proto(const struct hci_uart_proto *p);
int hci_uart_register_device(struct hci_uart *hu, const struct hci_uart_proto *p);
+void hci_uart_unregister_device(struct hci_uart *hu);
int hci_uart_tx_wakeup(struct hci_uart *hu);
int hci_uart_init_ready(struct hci_uart *hu);
diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig
index 2408ea38a39c..ae3d8f3444b9 100644
--- a/drivers/bus/Kconfig
+++ b/drivers/bus/Kconfig
@@ -132,7 +132,7 @@ config SIMPLE_PM_BUS
config SUNXI_RSB
tristate "Allwinner sunXi Reduced Serial Bus Driver"
- default MACH_SUN8I || MACH_SUN9I
+ default MACH_SUN8I || MACH_SUN9I || ARM64
depends on ARCH_SUNXI
select REGMAP
help
diff --git a/drivers/bus/arm-cci.c b/drivers/bus/arm-cci.c
index c49da15d9790..3c29d36702a8 100644
--- a/drivers/bus/arm-cci.c
+++ b/drivers/bus/arm-cci.c
@@ -2124,8 +2124,8 @@ int notrace __cci_control_port_by_device(struct device_node *dn, bool enable)
return -ENODEV;
port = __cci_ace_get_port(dn, ACE_LITE_PORT);
- if (WARN_ONCE(port < 0, "node %s ACE lite port look-up failure\n",
- dn->full_name))
+ if (WARN_ONCE(port < 0, "node %pOF ACE lite port look-up failure\n",
+ dn))
return -ENODEV;
cci_port_control(port, enable);
return 0;
@@ -2200,14 +2200,14 @@ static int cci_probe_ports(struct device_node *np)
if (of_property_read_string(cp, "interface-type",
&match_str)) {
- WARN(1, "node %s missing interface-type property\n",
- cp->full_name);
+ WARN(1, "node %pOF missing interface-type property\n",
+ cp);
continue;
}
is_ace = strcmp(match_str, "ace") == 0;
if (!is_ace && strcmp(match_str, "ace-lite")) {
- WARN(1, "node %s containing invalid interface-type property, skipping it\n",
- cp->full_name);
+ WARN(1, "node %pOF containing invalid interface-type property, skipping it\n",
+ cp);
continue;
}
diff --git a/drivers/bus/imx-weim.c b/drivers/bus/imx-weim.c
index 4bd361d64270..3d56ebcda720 100644
--- a/drivers/bus/imx-weim.c
+++ b/drivers/bus/imx-weim.c
@@ -156,8 +156,8 @@ static int __init weim_parse_dt(struct platform_device *pdev,
ret = weim_timing_setup(child, base, devtype);
if (ret)
- dev_warn(&pdev->dev, "%s set timing failed.\n",
- child->full_name);
+ dev_warn(&pdev->dev, "%pOF set timing failed.\n",
+ child);
else
have_child = 1;
}
@@ -166,8 +166,8 @@ static int __init weim_parse_dt(struct platform_device *pdev,
ret = of_platform_default_populate(pdev->dev.of_node,
NULL, &pdev->dev);
if (ret)
- dev_err(&pdev->dev, "%s fail to create devices.\n",
- pdev->dev.of_node->full_name);
+ dev_err(&pdev->dev, "%pOF fail to create devices.\n",
+ pdev->dev.of_node);
return ret;
}
diff --git a/drivers/bus/omap-ocp2scp.c b/drivers/bus/omap-ocp2scp.c
index bf500e0e7362..77791f3dcfc6 100644
--- a/drivers/bus/omap-ocp2scp.c
+++ b/drivers/bus/omap-ocp2scp.c
@@ -70,8 +70,10 @@ static int omap_ocp2scp_probe(struct platform_device *pdev)
if (!of_device_is_compatible(np, "ti,am437x-ocp2scp")) {
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
regs = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(regs))
- goto err0;
+ if (IS_ERR(regs)) {
+ ret = PTR_ERR(regs);
+ goto err1;
+ }
pm_runtime_get_sync(&pdev->dev);
reg = readl_relaxed(regs + OCP2SCP_TIMING);
@@ -83,6 +85,9 @@ static int omap_ocp2scp_probe(struct platform_device *pdev)
return 0;
+err1:
+ pm_runtime_disable(&pdev->dev);
+
err0:
device_for_each_child(&pdev->dev, NULL, ocp2scp_remove_devices);
diff --git a/drivers/bus/sunxi-rsb.c b/drivers/bus/sunxi-rsb.c
index 795c9d9c96a6..328ca93781cf 100644
--- a/drivers/bus/sunxi-rsb.c
+++ b/drivers/bus/sunxi-rsb.c
@@ -556,20 +556,20 @@ static int of_rsb_register_devices(struct sunxi_rsb *rsb)
/* Runtime addresses for all slaves should be set first */
for_each_available_child_of_node(np, child) {
- dev_dbg(dev, "setting child %s runtime address\n",
- child->full_name);
+ dev_dbg(dev, "setting child %pOF runtime address\n",
+ child);
ret = of_property_read_u32(child, "reg", &hwaddr);
if (ret) {
- dev_err(dev, "%s: invalid 'reg' property: %d\n",
- child->full_name, ret);
+ dev_err(dev, "%pOF: invalid 'reg' property: %d\n",
+ child, ret);
continue;
}
rtaddr = sunxi_rsb_get_rtaddr(hwaddr);
if (!rtaddr) {
- dev_err(dev, "%s: unknown hardware device address\n",
- child->full_name);
+ dev_err(dev, "%pOF: unknown hardware device address\n",
+ child);
continue;
}
@@ -586,15 +586,15 @@ static int of_rsb_register_devices(struct sunxi_rsb *rsb)
/* send command */
ret = _sunxi_rsb_run_xfer(rsb);
if (ret)
- dev_warn(dev, "%s: set runtime address failed: %d\n",
- child->full_name, ret);
+ dev_warn(dev, "%pOF: set runtime address failed: %d\n",
+ child, ret);
}
/* Then we start adding devices and probing them */
for_each_available_child_of_node(np, child) {
struct sunxi_rsb_device *rdev;
- dev_dbg(dev, "adding child %s\n", child->full_name);
+ dev_dbg(dev, "adding child %pOF\n", child);
ret = of_property_read_u32(child, "reg", &hwaddr);
if (ret)
@@ -606,8 +606,8 @@ static int of_rsb_register_devices(struct sunxi_rsb *rsb)
rdev = sunxi_rsb_device_create(rsb, child, hwaddr, rtaddr);
if (IS_ERR(rdev))
- dev_err(dev, "failed to add child device %s: %ld\n",
- child->full_name, PTR_ERR(rdev));
+ dev_err(dev, "failed to add child device %pOF: %ld\n",
+ child, PTR_ERR(rdev));
}
return 0;
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index ccd239ab879f..623714344600 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -161,7 +161,7 @@ config VIRTIO_CONSOLE
depends on VIRTIO && TTY
select HVC_DRIVER
help
- Virtio console for use with lguest and other hypervisors.
+ Virtio console for use with hypervisors.
Also serves as a general-purpose serial device for data
transfer between the guest and host. Character devices at
diff --git a/drivers/char/agp/ali-agp.c b/drivers/char/agp/ali-agp.c
index dcbbb4ea3cc1..89527bae4602 100644
--- a/drivers/char/agp/ali-agp.c
+++ b/drivers/char/agp/ali-agp.c
@@ -381,7 +381,7 @@ static void agp_ali_remove(struct pci_dev *pdev)
agp_put_bridge(bridge);
}
-static struct pci_device_id agp_ali_pci_table[] = {
+static const struct pci_device_id agp_ali_pci_table[] = {
{
.class = (PCI_CLASS_BRIDGE_HOST << 8),
.class_mask = ~0,
diff --git a/drivers/char/agp/amd-k7-agp.c b/drivers/char/agp/amd-k7-agp.c
index 5fbd333e4c6d..b450544dcaf0 100644
--- a/drivers/char/agp/amd-k7-agp.c
+++ b/drivers/char/agp/amd-k7-agp.c
@@ -21,7 +21,7 @@
#define AMD_TLBFLUSH 0x0c /* In mmio region (32-bit register) */
#define AMD_CACHEENTRY 0x10 /* In mmio region (32-bit register) */
-static struct pci_device_id agp_amdk7_pci_table[];
+static const struct pci_device_id agp_amdk7_pci_table[];
struct amd_page_map {
unsigned long *real;
@@ -508,7 +508,7 @@ static int agp_amdk7_resume(struct pci_dev *pdev)
#endif /* CONFIG_PM */
/* must be the same order as name table above */
-static struct pci_device_id agp_amdk7_pci_table[] = {
+static const struct pci_device_id agp_amdk7_pci_table[] = {
{
.class = (PCI_CLASS_BRIDGE_HOST << 8),
.class_mask = ~0,
diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c
index c99cd19d9147..e50c29c97ca7 100644
--- a/drivers/char/agp/amd64-agp.c
+++ b/drivers/char/agp/amd64-agp.c
@@ -610,7 +610,7 @@ static int agp_amd64_resume(struct pci_dev *pdev)
#endif /* CONFIG_PM */
-static struct pci_device_id agp_amd64_pci_table[] = {
+static const struct pci_device_id agp_amd64_pci_table[] = {
{
.class = (PCI_CLASS_BRIDGE_HOST << 8),
.class_mask = ~0,
diff --git a/drivers/char/agp/ati-agp.c b/drivers/char/agp/ati-agp.c
index 0b5ec7af2414..88b4cbee4dac 100644
--- a/drivers/char/agp/ati-agp.c
+++ b/drivers/char/agp/ati-agp.c
@@ -540,7 +540,7 @@ static void agp_ati_remove(struct pci_dev *pdev)
agp_put_bridge(bridge);
}
-static struct pci_device_id agp_ati_pci_table[] = {
+static const struct pci_device_id agp_ati_pci_table[] = {
{
.class = (PCI_CLASS_BRIDGE_HOST << 8),
.class_mask = ~0,
diff --git a/drivers/char/agp/efficeon-agp.c b/drivers/char/agp/efficeon-agp.c
index 533cb6d229b8..7f88490b5479 100644
--- a/drivers/char/agp/efficeon-agp.c
+++ b/drivers/char/agp/efficeon-agp.c
@@ -427,7 +427,7 @@ static int agp_efficeon_resume(struct pci_dev *pdev)
}
#endif
-static struct pci_device_id agp_efficeon_pci_table[] = {
+static const struct pci_device_id agp_efficeon_pci_table[] = {
{
.class = (PCI_CLASS_BRIDGE_HOST << 8),
.class_mask = ~0,
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index 0a21daed5b62..9e4f27a6cb5a 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -828,7 +828,7 @@ static int agp_intel_resume(struct pci_dev *pdev)
}
#endif
-static struct pci_device_id agp_intel_pci_table[] = {
+static const struct pci_device_id agp_intel_pci_table[] = {
#define ID(x) \
{ \
.class = (PCI_CLASS_BRIDGE_HOST << 8), \
diff --git a/drivers/char/agp/nvidia-agp.c b/drivers/char/agp/nvidia-agp.c
index 6c8d39cb566e..828b34445203 100644
--- a/drivers/char/agp/nvidia-agp.c
+++ b/drivers/char/agp/nvidia-agp.c
@@ -420,7 +420,7 @@ static int agp_nvidia_resume(struct pci_dev *pdev)
#endif
-static struct pci_device_id agp_nvidia_pci_table[] = {
+static const struct pci_device_id agp_nvidia_pci_table[] = {
{
.class = (PCI_CLASS_BRIDGE_HOST << 8),
.class_mask = ~0,
diff --git a/drivers/char/agp/sis-agp.c b/drivers/char/agp/sis-agp.c
index 2c74038da459..14909fc5d767 100644
--- a/drivers/char/agp/sis-agp.c
+++ b/drivers/char/agp/sis-agp.c
@@ -237,7 +237,7 @@ static int agp_sis_resume(struct pci_dev *pdev)
#endif /* CONFIG_PM */
-static struct pci_device_id agp_sis_pci_table[] = {
+static const struct pci_device_id agp_sis_pci_table[] = {
{
.class = (PCI_CLASS_BRIDGE_HOST << 8),
.class_mask = ~0,
diff --git a/drivers/char/agp/uninorth-agp.c b/drivers/char/agp/uninorth-agp.c
index fdced547ad59..c381c8e396fc 100644
--- a/drivers/char/agp/uninorth-agp.c
+++ b/drivers/char/agp/uninorth-agp.c
@@ -679,7 +679,7 @@ static void agp_uninorth_remove(struct pci_dev *pdev)
agp_put_bridge(bridge);
}
-static struct pci_device_id agp_uninorth_pci_table[] = {
+static const struct pci_device_id agp_uninorth_pci_table[] = {
{
.class = (PCI_CLASS_BRIDGE_HOST << 8),
.class_mask = ~0,
diff --git a/drivers/char/applicom.c b/drivers/char/applicom.c
index b67263d6e34b..c0a5b1f3a986 100644
--- a/drivers/char/applicom.c
+++ b/drivers/char/applicom.c
@@ -67,7 +67,7 @@ static char *applicom_pci_devnames[] = {
"PCI2000PFB"
};
-static struct pci_device_id applicom_pci_tbl[] = {
+static const struct pci_device_id applicom_pci_tbl[] = {
{ PCI_VDEVICE(APPLICOM, PCI_DEVICE_ID_APPLICOM_PCIGENERIC) },
{ PCI_VDEVICE(APPLICOM, PCI_DEVICE_ID_APPLICOM_PCI2000IBS_CAN) },
{ PCI_VDEVICE(APPLICOM, PCI_DEVICE_ID_APPLICOM_PCI2000PFB) },
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index 1b223c32a8ae..95a031e9eced 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -13,10 +13,8 @@ menuconfig HW_RANDOM
that's usually called /dev/hwrng, and which exposes one
of possibly several hardware random number generators.
- These hardware random number generators do not feed directly
- into the kernel's random number generator. That is usually
- handled by the "rngd" daemon. Documentation/hw_random.txt
- has more information.
+ These hardware random number generators do feed into the
+ kernel's random number generator entropy pool.
If unsure, say Y.
@@ -255,6 +253,20 @@ config HW_RANDOM_MXC_RNGA
If unsure, say Y.
+config HW_RANDOM_IMX_RNGC
+ tristate "Freescale i.MX RNGC Random Number Generator"
+ depends on ARCH_MXC
+ default HW_RANDOM
+ ---help---
+ This driver provides kernel-side support for the Random Number
+ Generator Version C hardware found on some Freescale i.MX
+ processors. Version B is also supported by this driver.
+
+ To compile this driver as a module, choose M here: the
+ module will be called imx-rngc.
+
+ If unsure, say Y.
+
config HW_RANDOM_NOMADIK
tristate "ST-Ericsson Nomadik Random Number Generator support"
depends on ARCH_NOMADIK
diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile
index b085975ec1d2..39a67defac67 100644
--- a/drivers/char/hw_random/Makefile
+++ b/drivers/char/hw_random/Makefile
@@ -20,6 +20,7 @@ obj-$(CONFIG_HW_RANDOM_PASEMI) += pasemi-rng.o
obj-$(CONFIG_HW_RANDOM_VIRTIO) += virtio-rng.o
obj-$(CONFIG_HW_RANDOM_TX4939) += tx4939-rng.o
obj-$(CONFIG_HW_RANDOM_MXC_RNGA) += mxc-rnga.o
+obj-$(CONFIG_HW_RANDOM_IMX_RNGC) += imx-rngc.o
obj-$(CONFIG_HW_RANDOM_OCTEON) += octeon-rng.o
obj-$(CONFIG_HW_RANDOM_NOMADIK) += nomadik-rng.o
obj-$(CONFIG_HW_RANDOM_PSERIES) += pseries-rng.o
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index 503a41dfa193..9701ac7d8b47 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -28,7 +28,10 @@
#define RNG_MODULE_NAME "hw_random"
static struct hwrng *current_rng;
+/* the current rng has been explicitly chosen by user via sysfs */
+static int cur_rng_set_by_user;
static struct task_struct *hwrng_fill;
+/* list of registered rngs, sorted decending by quality */
static LIST_HEAD(rng_list);
/* Protects rng_list and current_rng */
static DEFINE_MUTEX(rng_mutex);
@@ -303,6 +306,7 @@ static ssize_t hwrng_attr_current_store(struct device *dev,
list_for_each_entry(rng, &rng_list, list) {
if (sysfs_streq(rng->name, buf)) {
err = 0;
+ cur_rng_set_by_user = 1;
if (rng != current_rng)
err = set_current_rng(rng);
break;
@@ -351,16 +355,27 @@ static ssize_t hwrng_attr_available_show(struct device *dev,
return strlen(buf);
}
+static ssize_t hwrng_attr_selected_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n", cur_rng_set_by_user);
+}
+
static DEVICE_ATTR(rng_current, S_IRUGO | S_IWUSR,
hwrng_attr_current_show,
hwrng_attr_current_store);
static DEVICE_ATTR(rng_available, S_IRUGO,
hwrng_attr_available_show,
NULL);
+static DEVICE_ATTR(rng_selected, S_IRUGO,
+ hwrng_attr_selected_show,
+ NULL);
static struct attribute *rng_dev_attrs[] = {
&dev_attr_rng_current.attr,
&dev_attr_rng_available.attr,
+ &dev_attr_rng_selected.attr,
NULL
};
@@ -417,6 +432,7 @@ int hwrng_register(struct hwrng *rng)
{
int err = -EINVAL;
struct hwrng *old_rng, *tmp;
+ struct list_head *rng_list_ptr;
if (!rng->name || (!rng->data_read && !rng->read))
goto out;
@@ -432,14 +448,27 @@ int hwrng_register(struct hwrng *rng)
init_completion(&rng->cleanup_done);
complete(&rng->cleanup_done);
+ /* rng_list is sorted by decreasing quality */
+ list_for_each(rng_list_ptr, &rng_list) {
+ tmp = list_entry(rng_list_ptr, struct hwrng, list);
+ if (tmp->quality < rng->quality)
+ break;
+ }
+ list_add_tail(&rng->list, rng_list_ptr);
+
old_rng = current_rng;
err = 0;
- if (!old_rng) {
+ if (!old_rng ||
+ (!cur_rng_set_by_user && rng->quality > old_rng->quality)) {
+ /*
+ * Set new rng as current as the new rng source
+ * provides better entropy quality and was not
+ * chosen by userspace.
+ */
err = set_current_rng(rng);
if (err)
goto out_unlock;
}
- list_add_tail(&rng->list, &rng_list);
if (old_rng && !rng->init) {
/*
@@ -466,12 +495,13 @@ void hwrng_unregister(struct hwrng *rng)
list_del(&rng->list);
if (current_rng == rng) {
drop_current_rng();
+ cur_rng_set_by_user = 0;
+ /* rng_list is sorted by quality, use the best (=first) one */
if (!list_empty(&rng_list)) {
- struct hwrng *tail;
-
- tail = list_entry(rng_list.prev, struct hwrng, list);
+ struct hwrng *new_rng;
- set_current_rng(tail);
+ new_rng = list_entry(rng_list.next, struct hwrng, list);
+ set_current_rng(new_rng);
}
}
diff --git a/drivers/char/hw_random/imx-rngc.c b/drivers/char/hw_random/imx-rngc.c
new file mode 100644
index 000000000000..88db42d30760
--- /dev/null
+++ b/drivers/char/hw_random/imx-rngc.c
@@ -0,0 +1,331 @@
+/*
+ * RNG driver for Freescale RNGC
+ *
+ * Copyright (C) 2008-2012 Freescale Semiconductor, Inc.
+ * Copyright (C) 2017 Martin Kaiser <martin@kaiser.cx>
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/hw_random.h>
+#include <linux/completion.h>
+#include <linux/io.h>
+
+#define RNGC_COMMAND 0x0004
+#define RNGC_CONTROL 0x0008
+#define RNGC_STATUS 0x000C
+#define RNGC_ERROR 0x0010
+#define RNGC_FIFO 0x0014
+
+#define RNGC_CMD_CLR_ERR 0x00000020
+#define RNGC_CMD_CLR_INT 0x00000010
+#define RNGC_CMD_SEED 0x00000002
+#define RNGC_CMD_SELF_TEST 0x00000001
+
+#define RNGC_CTRL_MASK_ERROR 0x00000040
+#define RNGC_CTRL_MASK_DONE 0x00000020
+
+#define RNGC_STATUS_ERROR 0x00010000
+#define RNGC_STATUS_FIFO_LEVEL_MASK 0x00000f00
+#define RNGC_STATUS_FIFO_LEVEL_SHIFT 8
+#define RNGC_STATUS_SEED_DONE 0x00000020
+#define RNGC_STATUS_ST_DONE 0x00000010
+
+#define RNGC_ERROR_STATUS_STAT_ERR 0x00000008
+
+#define RNGC_TIMEOUT 3000 /* 3 sec */
+
+
+static bool self_test = true;
+module_param(self_test, bool, 0);
+
+struct imx_rngc {
+ struct device *dev;
+ struct clk *clk;
+ void __iomem *base;
+ struct hwrng rng;
+ struct completion rng_op_done;
+ /*
+ * err_reg is written only by the irq handler and read only
+ * when interrupts are masked, we need no spinlock
+ */
+ u32 err_reg;
+};
+
+
+static inline void imx_rngc_irq_mask_clear(struct imx_rngc *rngc)
+{
+ u32 ctrl, cmd;
+
+ /* mask interrupts */
+ ctrl = readl(rngc->base + RNGC_CONTROL);
+ ctrl |= RNGC_CTRL_MASK_DONE | RNGC_CTRL_MASK_ERROR;
+ writel(ctrl, rngc->base + RNGC_CONTROL);
+
+ /*
+ * CLR_INT clears the interrupt only if there's no error
+ * CLR_ERR clear the interrupt and the error register if there
+ * is an error
+ */
+ cmd = readl(rngc->base + RNGC_COMMAND);
+ cmd |= RNGC_CMD_CLR_INT | RNGC_CMD_CLR_ERR;
+ writel(cmd, rngc->base + RNGC_COMMAND);
+}
+
+static inline void imx_rngc_irq_unmask(struct imx_rngc *rngc)
+{
+ u32 ctrl;
+
+ ctrl = readl(rngc->base + RNGC_CONTROL);
+ ctrl &= ~(RNGC_CTRL_MASK_DONE | RNGC_CTRL_MASK_ERROR);
+ writel(ctrl, rngc->base + RNGC_CONTROL);
+}
+
+static int imx_rngc_self_test(struct imx_rngc *rngc)
+{
+ u32 cmd;
+ int ret;
+
+ imx_rngc_irq_unmask(rngc);
+
+ /* run self test */
+ cmd = readl(rngc->base + RNGC_COMMAND);
+ writel(cmd | RNGC_CMD_SELF_TEST, rngc->base + RNGC_COMMAND);
+
+ ret = wait_for_completion_timeout(&rngc->rng_op_done, RNGC_TIMEOUT);
+ if (!ret) {
+ imx_rngc_irq_mask_clear(rngc);
+ return -ETIMEDOUT;
+ }
+
+ if (rngc->err_reg != 0)
+ return -EIO;
+
+ return 0;
+}
+
+static int imx_rngc_read(struct hwrng *rng, void *data, size_t max, bool wait)
+{
+ struct imx_rngc *rngc = container_of(rng, struct imx_rngc, rng);
+ unsigned int status;
+ unsigned int level;
+ int retval = 0;
+
+ while (max >= sizeof(u32)) {
+ status = readl(rngc->base + RNGC_STATUS);
+
+ /* is there some error while reading this random number? */
+ if (status & RNGC_STATUS_ERROR)
+ break;
+
+ /* how many random numbers are in FIFO? [0-16] */
+ level = (status & RNGC_STATUS_FIFO_LEVEL_MASK) >>
+ RNGC_STATUS_FIFO_LEVEL_SHIFT;
+
+ if (level) {
+ /* retrieve a random number from FIFO */
+ *(u32 *)data = readl(rngc->base + RNGC_FIFO);
+
+ retval += sizeof(u32);
+ data += sizeof(u32);
+ max -= sizeof(u32);
+ }
+ }
+
+ return retval ? retval : -EIO;
+}
+
+static irqreturn_t imx_rngc_irq(int irq, void *priv)
+{
+ struct imx_rngc *rngc = (struct imx_rngc *)priv;
+ u32 status;
+
+ /*
+ * clearing the interrupt will also clear the error register
+ * read error and status before clearing
+ */
+ status = readl(rngc->base + RNGC_STATUS);
+ rngc->err_reg = readl(rngc->base + RNGC_ERROR);
+
+ imx_rngc_irq_mask_clear(rngc);
+
+ if (status & (RNGC_STATUS_SEED_DONE | RNGC_STATUS_ST_DONE))
+ complete(&rngc->rng_op_done);
+
+ return IRQ_HANDLED;
+}
+
+static int imx_rngc_init(struct hwrng *rng)
+{
+ struct imx_rngc *rngc = container_of(rng, struct imx_rngc, rng);
+ u32 cmd;
+ int ret;
+
+ /* clear error */
+ cmd = readl(rngc->base + RNGC_COMMAND);
+ writel(cmd | RNGC_CMD_CLR_ERR, rngc->base + RNGC_COMMAND);
+
+ /* create seed, repeat while there is some statistical error */
+ do {
+ imx_rngc_irq_unmask(rngc);
+
+ /* seed creation */
+ cmd = readl(rngc->base + RNGC_COMMAND);
+ writel(cmd | RNGC_CMD_SEED, rngc->base + RNGC_COMMAND);
+
+ ret = wait_for_completion_timeout(&rngc->rng_op_done,
+ RNGC_TIMEOUT);
+
+ if (!ret) {
+ imx_rngc_irq_mask_clear(rngc);
+ return -ETIMEDOUT;
+ }
+
+ } while (rngc->err_reg == RNGC_ERROR_STATUS_STAT_ERR);
+
+ return rngc->err_reg ? -EIO : 0;
+}
+
+static int imx_rngc_probe(struct platform_device *pdev)
+{
+ struct imx_rngc *rngc;
+ struct resource *res;
+ int ret;
+ int irq;
+
+ rngc = devm_kzalloc(&pdev->dev, sizeof(*rngc), GFP_KERNEL);
+ if (!rngc)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ rngc->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(rngc->base))
+ return PTR_ERR(rngc->base);
+
+ rngc->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(rngc->clk)) {
+ dev_err(&pdev->dev, "Can not get rng_clk\n");
+ return PTR_ERR(rngc->clk);
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0) {
+ dev_err(&pdev->dev, "Couldn't get irq %d\n", irq);
+ return irq;
+ }
+
+ ret = clk_prepare_enable(rngc->clk);
+ if (ret)
+ return ret;
+
+ ret = devm_request_irq(&pdev->dev,
+ irq, imx_rngc_irq, 0, pdev->name, (void *)rngc);
+ if (ret) {
+ dev_err(rngc->dev, "Can't get interrupt working.\n");
+ goto err;
+ }
+
+ init_completion(&rngc->rng_op_done);
+
+ rngc->rng.name = pdev->name;
+ rngc->rng.init = imx_rngc_init;
+ rngc->rng.read = imx_rngc_read;
+
+ rngc->dev = &pdev->dev;
+ platform_set_drvdata(pdev, rngc);
+
+ imx_rngc_irq_mask_clear(rngc);
+
+ if (self_test) {
+ ret = imx_rngc_self_test(rngc);
+ if (ret) {
+ dev_err(rngc->dev, "FSL RNGC self test failed.\n");
+ goto err;
+ }
+ }
+
+ ret = hwrng_register(&rngc->rng);
+ if (ret) {
+ dev_err(&pdev->dev, "FSL RNGC registering failed (%d)\n", ret);
+ goto err;
+ }
+
+ dev_info(&pdev->dev, "Freescale RNGC registered.\n");
+ return 0;
+
+err:
+ clk_disable_unprepare(rngc->clk);
+
+ return ret;
+}
+
+static int __exit imx_rngc_remove(struct platform_device *pdev)
+{
+ struct imx_rngc *rngc = platform_get_drvdata(pdev);
+
+ hwrng_unregister(&rngc->rng);
+
+ clk_disable_unprepare(rngc->clk);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int imx_rngc_suspend(struct device *dev)
+{
+ struct imx_rngc *rngc = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(rngc->clk);
+
+ return 0;
+}
+
+static int imx_rngc_resume(struct device *dev)
+{
+ struct imx_rngc *rngc = dev_get_drvdata(dev);
+
+ clk_prepare_enable(rngc->clk);
+
+ return 0;
+}
+
+static const struct dev_pm_ops imx_rngc_pm_ops = {
+ .suspend = imx_rngc_suspend,
+ .resume = imx_rngc_resume,
+};
+#endif
+
+static const struct of_device_id imx_rngc_dt_ids[] = {
+ { .compatible = "fsl,imx25-rngb", .data = NULL, },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, imx_rngc_dt_ids);
+
+static struct platform_driver imx_rngc_driver = {
+ .driver = {
+ .name = "imx_rngc",
+#ifdef CONFIG_PM
+ .pm = &imx_rngc_pm_ops,
+#endif
+ .of_match_table = imx_rngc_dt_ids,
+ },
+ .remove = __exit_p(imx_rngc_remove),
+};
+
+module_platform_driver_probe(imx_rngc_driver, imx_rngc_probe);
+
+MODULE_AUTHOR("Freescale Semiconductor, Inc.");
+MODULE_DESCRIPTION("H/W RNGC driver for i.MX");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 985973855005..36f47e8d06a3 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -2812,7 +2812,7 @@ static struct platform_driver ipmi_driver = {
};
#ifdef CONFIG_PARISC
-static int ipmi_parisc_probe(struct parisc_device *dev)
+static int __init ipmi_parisc_probe(struct parisc_device *dev)
{
struct smi_info *info;
int rv;
@@ -2850,22 +2850,24 @@ static int ipmi_parisc_probe(struct parisc_device *dev)
return 0;
}
-static int ipmi_parisc_remove(struct parisc_device *dev)
+static int __exit ipmi_parisc_remove(struct parisc_device *dev)
{
cleanup_one_si(dev_get_drvdata(&dev->dev));
return 0;
}
-static const struct parisc_device_id ipmi_parisc_tbl[] = {
+static const struct parisc_device_id ipmi_parisc_tbl[] __initconst = {
{ HPHW_MC, HVERSION_REV_ANY_ID, 0x004, 0xC0 },
{ 0, }
};
-static struct parisc_driver ipmi_parisc_driver = {
+MODULE_DEVICE_TABLE(parisc, ipmi_parisc_tbl);
+
+static struct parisc_driver ipmi_parisc_driver __refdata = {
.name = "ipmi",
.id_table = ipmi_parisc_tbl,
.probe = ipmi_parisc_probe,
- .remove = ipmi_parisc_remove,
+ .remove = __exit_p(ipmi_parisc_remove),
};
#endif /* CONFIG_PARISC */
diff --git a/drivers/char/mwave/smapi.c b/drivers/char/mwave/smapi.c
index 8c5411a8f33f..691f5898bb32 100644
--- a/drivers/char/mwave/smapi.c
+++ b/drivers/char/mwave/smapi.c
@@ -128,10 +128,11 @@ int smapi_query_DSP_cfg(SMAPI_DSP_SETTINGS * pSettings)
{
int bRC = -EIO;
unsigned short usAX, usBX, usCX, usDX, usDI, usSI;
- unsigned short ausDspBases[] = { 0x0030, 0x4E30, 0x8E30, 0xCE30, 0x0130, 0x0350, 0x0070, 0x0DB0 };
- unsigned short ausUartBases[] = { 0x03F8, 0x02F8, 0x03E8, 0x02E8 };
- unsigned short numDspBases = 8;
- unsigned short numUartBases = 4;
+ static const unsigned short ausDspBases[] = {
+ 0x0030, 0x4E30, 0x8E30, 0xCE30,
+ 0x0130, 0x0350, 0x0070, 0x0DB0 };
+ static const unsigned short ausUartBases[] = {
+ 0x03F8, 0x02F8, 0x03E8, 0x02E8 };
PRINTK_1(TRACE_SMAPI, "smapi::smapi_query_DSP_cfg entry\n");
@@ -148,7 +149,7 @@ int smapi_query_DSP_cfg(SMAPI_DSP_SETTINGS * pSettings)
pSettings->bDSPEnabled = ((usCX & 0x0001) != 0);
pSettings->usDspIRQ = usSI & 0x00FF;
pSettings->usDspDMA = (usSI & 0xFF00) >> 8;
- if ((usDI & 0x00FF) < numDspBases) {
+ if ((usDI & 0x00FF) < ARRAY_SIZE(ausDspBases)) {
pSettings->usDspBaseIO = ausDspBases[usDI & 0x00FF];
} else {
pSettings->usDspBaseIO = 0;
@@ -176,7 +177,7 @@ int smapi_query_DSP_cfg(SMAPI_DSP_SETTINGS * pSettings)
pSettings->bModemEnabled = ((usCX & 0x0001) != 0);
pSettings->usUartIRQ = usSI & 0x000F;
- if (((usSI & 0xFF00) >> 8) < numUartBases) {
+ if (((usSI & 0xFF00) >> 8) < ARRAY_SIZE(ausUartBases)) {
pSettings->usUartBaseIO = ausUartBases[(usSI & 0xFF00) >> 8];
} else {
pSettings->usUartBaseIO = 0;
@@ -205,15 +206,16 @@ int smapi_set_DSP_cfg(void)
int bRC = -EIO;
int i;
unsigned short usAX, usBX, usCX, usDX, usDI, usSI;
- unsigned short ausDspBases[] = { 0x0030, 0x4E30, 0x8E30, 0xCE30, 0x0130, 0x0350, 0x0070, 0x0DB0 };
- unsigned short ausUartBases[] = { 0x03F8, 0x02F8, 0x03E8, 0x02E8 };
- unsigned short ausDspIrqs[] = { 5, 7, 10, 11, 15 };
- unsigned short ausUartIrqs[] = { 3, 4 };
-
- unsigned short numDspBases = 8;
- unsigned short numUartBases = 4;
- unsigned short numDspIrqs = 5;
- unsigned short numUartIrqs = 2;
+ static const unsigned short ausDspBases[] = {
+ 0x0030, 0x4E30, 0x8E30, 0xCE30,
+ 0x0130, 0x0350, 0x0070, 0x0DB0 };
+ static const unsigned short ausUartBases[] = {
+ 0x03F8, 0x02F8, 0x03E8, 0x02E8 };
+ static const unsigned short ausDspIrqs[] = {
+ 5, 7, 10, 11, 15 };
+ static const unsigned short ausUartIrqs[] = {
+ 3, 4 };
+
unsigned short dspio_index = 0, uartio_index = 0;
PRINTK_5(TRACE_SMAPI,
@@ -221,11 +223,11 @@ int smapi_set_DSP_cfg(void)
mwave_3780i_irq, mwave_3780i_io, mwave_uart_irq, mwave_uart_io);
if (mwave_3780i_io) {
- for (i = 0; i < numDspBases; i++) {
+ for (i = 0; i < ARRAY_SIZE(ausDspBases); i++) {
if (mwave_3780i_io == ausDspBases[i])
break;
}
- if (i == numDspBases) {
+ if (i == ARRAY_SIZE(ausDspBases)) {
PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_set_DSP_cfg: Error: Invalid mwave_3780i_io address %x. Aborting.\n", mwave_3780i_io);
return bRC;
}
@@ -233,22 +235,22 @@ int smapi_set_DSP_cfg(void)
}
if (mwave_3780i_irq) {
- for (i = 0; i < numDspIrqs; i++) {
+ for (i = 0; i < ARRAY_SIZE(ausDspIrqs); i++) {
if (mwave_3780i_irq == ausDspIrqs[i])
break;
}
- if (i == numDspIrqs) {
+ if (i == ARRAY_SIZE(ausDspIrqs)) {
PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_set_DSP_cfg: Error: Invalid mwave_3780i_irq %x. Aborting.\n", mwave_3780i_irq);
return bRC;
}
}
if (mwave_uart_io) {
- for (i = 0; i < numUartBases; i++) {
+ for (i = 0; i < ARRAY_SIZE(ausUartBases); i++) {
if (mwave_uart_io == ausUartBases[i])
break;
}
- if (i == numUartBases) {
+ if (i == ARRAY_SIZE(ausUartBases)) {
PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_set_DSP_cfg: Error: Invalid mwave_uart_io address %x. Aborting.\n", mwave_uart_io);
return bRC;
}
@@ -257,11 +259,11 @@ int smapi_set_DSP_cfg(void)
if (mwave_uart_irq) {
- for (i = 0; i < numUartIrqs; i++) {
+ for (i = 0; i < ARRAY_SIZE(ausUartIrqs); i++) {
if (mwave_uart_irq == ausUartIrqs[i])
break;
}
- if (i == numUartIrqs) {
+ if (i == ARRAY_SIZE(ausUartIrqs)) {
PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_set_DSP_cfg: Error: Invalid mwave_uart_irq %x. Aborting.\n", mwave_uart_irq);
return bRC;
}
diff --git a/drivers/char/ppdev.c b/drivers/char/ppdev.c
index 3e73bcdf9e65..d256110ba672 100644
--- a/drivers/char/ppdev.c
+++ b/drivers/char/ppdev.c
@@ -101,9 +101,6 @@ static DEFINE_IDA(ida_index);
#define PP_BUFFER_SIZE 1024
#define PARDEVICE_MAX 8
-/* ROUND_UP macro from fs/select.c */
-#define ROUND_UP(x,y) (((x)+(y)-1)/(y))
-
static DEFINE_MUTEX(pp_do_mutex);
/* define fixed sized ioctl cmd for y2038 migration */
diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
index f4f866ee54bc..d3a979e25724 100644
--- a/drivers/char/sonypi.c
+++ b/drivers/char/sonypi.c
@@ -1491,7 +1491,7 @@ static struct platform_driver sonypi_driver = {
static struct platform_device *sonypi_platform_device;
-static struct dmi_system_id __initdata sonypi_dmi_table[] = {
+static const struct dmi_system_id sonypi_dmi_table[] __initconst = {
{
.ident = "Sony Vaio",
.matches = {
diff --git a/drivers/char/tlclk.c b/drivers/char/tlclk.c
index 572a51704e67..6210bff46341 100644
--- a/drivers/char/tlclk.c
+++ b/drivers/char/tlclk.c
@@ -766,7 +766,7 @@ static struct attribute *tlclk_sysfs_entries[] = {
NULL
};
-static struct attribute_group tlclk_attribute_group = {
+static const struct attribute_group tlclk_attribute_group = {
.name = NULL, /* put in device directory */
.attrs = tlclk_sysfs_entries,
};
diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c
index 67ec9d3d04f5..0eca20c5a80c 100644
--- a/drivers/char/tpm/tpm-chip.c
+++ b/drivers/char/tpm/tpm-chip.c
@@ -164,14 +164,7 @@ static int tpm_class_shutdown(struct device *dev)
chip->ops = NULL;
up_write(&chip->ops_sem);
}
- /* Allow bus- and device-specific code to run. Note: since chip->ops
- * is NULL, more-specific shutdown code will not be able to issue TPM
- * commands.
- */
- if (dev->bus && dev->bus->shutdown)
- dev->bus->shutdown(dev);
- else if (dev->driver && dev->driver->shutdown)
- dev->driver->shutdown(dev);
+
return 0;
}
@@ -214,7 +207,7 @@ struct tpm_chip *tpm_chip_alloc(struct device *pdev,
device_initialize(&chip->devs);
chip->dev.class = tpm_class;
- chip->dev.class->shutdown = tpm_class_shutdown;
+ chip->dev.class->shutdown_pre = tpm_class_shutdown;
chip->dev.release = tpm_dev_release;
chip->dev.parent = pdev;
chip->dev.groups = chip->groups;
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index ad843eb02ae7..d1aed2513bd9 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -451,9 +451,6 @@ static struct port_buffer *alloc_buf(struct virtqueue *vq, size_t buf_size,
* device is created by remoteproc, the DMA memory is
* associated with the grandparent device:
* vdev => rproc => platform-dev.
- * The code here would have been less quirky if
- * DMA_MEMORY_INCLUDES_CHILDREN had been supported
- * in dma-coherent.c
*/
if (!vq->vdev->dev.parent || !vq->vdev->dev.parent->parent)
goto free_buf;
@@ -1130,7 +1127,7 @@ static const struct file_operations port_fops = {
* We turn the characters into a scatter-gather list, add it to the
* output queue and then kick the Host. Then we sit here waiting for
* it to finish: inefficient in theory, but in practice
- * implementations will do it immediately (lguest's Launcher does).
+ * implementations will do it immediately.
*/
static int put_chars(u32 vtermno, const char *buf, int count)
{
@@ -1308,7 +1305,7 @@ static struct attribute *port_sysfs_entries[] = {
NULL
};
-static struct attribute_group port_attribute_group = {
+static const struct attribute_group port_attribute_group = {
.name = NULL, /* put in device directory */
.attrs = port_sysfs_entries,
};
diff --git a/drivers/char/xilinx_hwicap/xilinx_hwicap.c b/drivers/char/xilinx_hwicap/xilinx_hwicap.c
index 3e6b23c3453c..067396bedf22 100644
--- a/drivers/char/xilinx_hwicap/xilinx_hwicap.c
+++ b/drivers/char/xilinx_hwicap/xilinx_hwicap.c
@@ -86,8 +86,7 @@
#include <linux/cdev.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
-
-#include <asm/io.h>
+#include <linux/io.h>
#include <linux/uaccess.h>
#ifdef CONFIG_OF
@@ -222,6 +221,8 @@ static const struct config_registers v6_config_registers = {
* hwicap_command_desync - Send a DESYNC command to the ICAP port.
* @drvdata: a pointer to the drvdata.
*
+ * Returns: '0' on success and failure value on error
+ *
* This command desynchronizes the ICAP After this command, a
* bitstream containing a NULL packet, followed by a SYNCH packet is
* required before the ICAP will recognize commands.
@@ -251,10 +252,12 @@ static int hwicap_command_desync(struct hwicap_drvdata *drvdata)
* hwicap_get_configuration_register - Query a configuration register.
* @drvdata: a pointer to the drvdata.
* @reg: a constant which represents the configuration
- * register value to be returned.
- * Examples: XHI_IDCODE, XHI_FLR.
+ * register value to be returned.
+ * Examples: XHI_IDCODE, XHI_FLR.
* @reg_data: returns the value of the register.
*
+ * Returns: '0' on success and failure value on error
+ *
* Sends a query packet to the ICAP and then receives the response.
* The icap is left in Synched state.
*/
@@ -320,7 +323,8 @@ static int hwicap_initialize_hwicap(struct hwicap_drvdata *drvdata)
dev_dbg(drvdata->dev, "initializing\n");
/* Abort any current transaction, to make sure we have the
- * ICAP in a good state. */
+ * ICAP in a good state.
+ */
dev_dbg(drvdata->dev, "Reset...\n");
drvdata->config->reset(drvdata);
@@ -632,7 +636,6 @@ static int hwicap_setup(struct device *dev, int id,
drvdata = kzalloc(sizeof(struct hwicap_drvdata), GFP_KERNEL);
if (!drvdata) {
- dev_err(dev, "Couldn't allocate device private record\n");
retval = -ENOMEM;
goto failed0;
}
@@ -759,20 +762,20 @@ static int hwicap_of_probe(struct platform_device *op,
id = of_get_property(op->dev.of_node, "port-number", NULL);
/* It's most likely that we're using V4, if the family is not
- specified */
+ * specified
+ */
regs = &v4_config_registers;
family = of_get_property(op->dev.of_node, "xlnx,family", NULL);
if (family) {
- if (!strcmp(family, "virtex2p")) {
+ if (!strcmp(family, "virtex2p"))
regs = &v2_config_registers;
- } else if (!strcmp(family, "virtex4")) {
+ else if (!strcmp(family, "virtex4"))
regs = &v4_config_registers;
- } else if (!strcmp(family, "virtex5")) {
+ else if (!strcmp(family, "virtex5"))
regs = &v5_config_registers;
- } else if (!strcmp(family, "virtex6")) {
+ else if (!strcmp(family, "virtex6"))
regs = &v6_config_registers;
- }
}
return hwicap_setup(&op->dev, id ? *id : -1, &res, config,
regs);
@@ -802,20 +805,20 @@ static int hwicap_drv_probe(struct platform_device *pdev)
return -ENODEV;
/* It's most likely that we're using V4, if the family is not
- specified */
+ * specified
+ */
regs = &v4_config_registers;
family = pdev->dev.platform_data;
if (family) {
- if (!strcmp(family, "virtex2p")) {
+ if (!strcmp(family, "virtex2p"))
regs = &v2_config_registers;
- } else if (!strcmp(family, "virtex4")) {
+ else if (!strcmp(family, "virtex4"))
regs = &v4_config_registers;
- } else if (!strcmp(family, "virtex5")) {
+ else if (!strcmp(family, "virtex5"))
regs = &v5_config_registers;
- } else if (!strcmp(family, "virtex6")) {
+ else if (!strcmp(family, "virtex6"))
regs = &v6_config_registers;
- }
}
return hwicap_setup(&pdev->dev, pdev->id, res,
diff --git a/drivers/char/xilinx_hwicap/xilinx_hwicap.h b/drivers/char/xilinx_hwicap/xilinx_hwicap.h
index 38b145eaf24d..6b963d1c8ba3 100644
--- a/drivers/char/xilinx_hwicap/xilinx_hwicap.h
+++ b/drivers/char/xilinx_hwicap/xilinx_hwicap.h
@@ -62,11 +62,13 @@ struct hwicap_drvdata {
struct hwicap_driver_config {
/* Read configuration data given by size into the data buffer.
- Return 0 if successful. */
+ * Return 0 if successful.
+ */
int (*get_configuration)(struct hwicap_drvdata *drvdata, u32 *data,
u32 size);
/* Write configuration data given by size from the data buffer.
- Return 0 if successful. */
+ * Return 0 if successful.
+ */
int (*set_configuration)(struct hwicap_drvdata *drvdata, u32 *data,
u32 size);
/* Get the status register, bit pattern given by:
@@ -193,11 +195,12 @@ struct config_registers {
* hwicap_type_1_read - Generates a Type 1 read packet header.
* @reg: is the address of the register to be read back.
*
+ * Return:
* Generates a Type 1 read packet header, which is used to indirectly
* read registers in the configuration logic. This packet must then
* be sent through the icap device, and a return packet received with
* the information.
- **/
+ */
static inline u32 hwicap_type_1_read(u32 reg)
{
return (XHI_TYPE_1 << XHI_TYPE_SHIFT) |
@@ -208,7 +211,9 @@ static inline u32 hwicap_type_1_read(u32 reg)
/**
* hwicap_type_1_write - Generates a Type 1 write packet header
* @reg: is the address of the register to be read back.
- **/
+ *
+ * Return: Type 1 write packet header
+ */
static inline u32 hwicap_type_1_write(u32 reg)
{
return (XHI_TYPE_1 << XHI_TYPE_SHIFT) |
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index 68ca2d9fcd73..1c4e1aa6767e 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -31,6 +31,13 @@ config COMMON_CLK_WM831X
source "drivers/clk/versatile/Kconfig"
+config CLK_HSDK
+ bool "PLL Driver for HSDK platform"
+ depends on OF || COMPILE_TEST
+ ---help---
+ This driver supports the HSDK core, system, ddr, tunnel and hdmi PLLs
+ control.
+
config COMMON_CLK_MAX77686
tristate "Clock driver for Maxim 77620/77686/77802 MFD"
depends on MFD_MAX77686 || MFD_MAX77620 || COMPILE_TEST
@@ -39,10 +46,10 @@ config COMMON_CLK_MAX77686
clock.
config COMMON_CLK_RK808
- tristate "Clock driver for RK808/RK818"
+ tristate "Clock driver for RK805/RK808/RK818"
depends on MFD_RK808
---help---
- This driver supports RK808 and RK818 crystal oscillator clock. These
+ This driver supports RK805, RK808 and RK818 crystal oscillator clock. These
multi-function devices have two fixed-rate oscillators,
clocked at 32KHz each. Clkout1 is always on, Clkout2 can off
by control register.
@@ -210,14 +217,14 @@ config COMMON_CLK_OXNAS
Support for the OXNAS SoC Family clocks.
config COMMON_CLK_VC5
- tristate "Clock driver for IDT VersaClock5 devices"
+ tristate "Clock driver for IDT VersaClock 5,6 devices"
depends on I2C
depends on OF
select REGMAP_I2C
help
---help---
- This driver supports the IDT VersaClock5 programmable clock
- generator.
+ This driver supports the IDT VersaClock 5 and VersaClock 6
+ programmable clock generators.
source "drivers/clk/bcm/Kconfig"
source "drivers/clk/hisilicon/Kconfig"
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index cd376b3fb47a..c99f363826f0 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -27,8 +27,8 @@ obj-$(CONFIG_COMMON_CLK_CS2000_CP) += clk-cs2000-cp.o
obj-$(CONFIG_ARCH_EFM32) += clk-efm32gg.o
obj-$(CONFIG_COMMON_CLK_GEMINI) += clk-gemini.o
obj-$(CONFIG_ARCH_HIGHBANK) += clk-highbank.o
+obj-$(CONFIG_CLK_HSDK) += clk-hsdk-pll.o
obj-$(CONFIG_COMMON_CLK_MAX77686) += clk-max77686.o
-obj-$(CONFIG_ARCH_MB86S7X) += clk-mb86s7x.o
obj-$(CONFIG_ARCH_MOXART) += clk-moxart.o
obj-$(CONFIG_ARCH_NOMADIK) += clk-nomadik.o
obj-$(CONFIG_ARCH_NSPIRE) += clk-nspire.o
@@ -44,6 +44,7 @@ obj-$(CONFIG_COMMON_CLK_SI5351) += clk-si5351.o
obj-$(CONFIG_COMMON_CLK_SI514) += clk-si514.o
obj-$(CONFIG_COMMON_CLK_SI570) += clk-si570.o
obj-$(CONFIG_ARCH_STM32) += clk-stm32f4.o
+obj-$(CONFIG_ARCH_STM32) += clk-stm32h7.o
obj-$(CONFIG_ARCH_TANGO) += clk-tango4.o
obj-$(CONFIG_CLK_TWL6040) += clk-twl6040.o
obj-$(CONFIG_ARCH_U300) += clk-u300.o
diff --git a/drivers/clk/at91/Makefile b/drivers/clk/at91/Makefile
index 13e67bd35cff..c68947b65a4c 100644
--- a/drivers/clk/at91/Makefile
+++ b/drivers/clk/at91/Makefile
@@ -6,6 +6,7 @@ obj-y += pmc.o sckc.o
obj-y += clk-slow.o clk-main.o clk-pll.o clk-plldiv.o clk-master.o
obj-y += clk-system.o clk-peripheral.o clk-programmable.o
+obj-$(CONFIG_HAVE_AT91_AUDIO_PLL) += clk-audio-pll.o
obj-$(CONFIG_HAVE_AT91_UTMI) += clk-utmi.o
obj-$(CONFIG_HAVE_AT91_USB_CLK) += clk-usb.o
obj-$(CONFIG_HAVE_AT91_SMD) += clk-smd.o
diff --git a/drivers/clk/at91/clk-audio-pll.c b/drivers/clk/at91/clk-audio-pll.c
new file mode 100644
index 000000000000..da7bafcfbe70
--- /dev/null
+++ b/drivers/clk/at91/clk-audio-pll.c
@@ -0,0 +1,536 @@
+/*
+ * Copyright (C) 2016 Atmel Corporation,
+ * Songjun Wu <songjun.wu@atmel.com>,
+ * Nicolas Ferre <nicolas.ferre@atmel.com>
+ * Copyright (C) 2017 Free Electrons,
+ * Quentin Schulz <quentin.schulz@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * The Sama5d2 SoC has two audio PLLs (PMC and PAD) that shares the same parent
+ * (FRAC). FRAC can output between 620 and 700MHz and only multiply the rate of
+ * its own parent. PMC and PAD can then divide the FRAC rate to best match the
+ * asked rate.
+ *
+ * Traits of FRAC clock:
+ * enable - clk_enable writes nd, fracr parameters and enables PLL
+ * rate - rate is adjustable.
+ * clk->rate = parent->rate * ((nd + 1) + (fracr / 2^22))
+ * parent - fixed parent. No clk_set_parent support
+ *
+ * Traits of PMC clock:
+ * enable - clk_enable writes qdpmc, and enables PMC output
+ * rate - rate is adjustable.
+ * clk->rate = parent->rate / (qdpmc + 1)
+ * parent - fixed parent. No clk_set_parent support
+ *
+ * Traits of PAD clock:
+ * enable - clk_enable writes divisors and enables PAD output
+ * rate - rate is adjustable.
+ * clk->rate = parent->rate / (qdaudio * div))
+ * parent - fixed parent. No clk_set_parent support
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/clk/at91_pmc.h>
+#include <linux/of.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#define AUDIO_PLL_DIV_FRAC BIT(22)
+#define AUDIO_PLL_ND_MAX (AT91_PMC_AUDIO_PLL_ND_MASK >> \
+ AT91_PMC_AUDIO_PLL_ND_OFFSET)
+
+#define AUDIO_PLL_QDPAD(qd, div) ((AT91_PMC_AUDIO_PLL_QDPAD_EXTDIV(qd) & \
+ AT91_PMC_AUDIO_PLL_QDPAD_EXTDIV_MASK) | \
+ (AT91_PMC_AUDIO_PLL_QDPAD_DIV(div) & \
+ AT91_PMC_AUDIO_PLL_QDPAD_DIV_MASK))
+
+#define AUDIO_PLL_QDPMC_MAX (AT91_PMC_AUDIO_PLL_QDPMC_MASK >> \
+ AT91_PMC_AUDIO_PLL_QDPMC_OFFSET)
+
+#define AUDIO_PLL_FOUT_MIN 620000000UL
+#define AUDIO_PLL_FOUT_MAX 700000000UL
+
+struct clk_audio_frac {
+ struct clk_hw hw;
+ struct regmap *regmap;
+ u32 fracr;
+ u8 nd;
+};
+
+struct clk_audio_pad {
+ struct clk_hw hw;
+ struct regmap *regmap;
+ u8 qdaudio;
+ u8 div;
+};
+
+struct clk_audio_pmc {
+ struct clk_hw hw;
+ struct regmap *regmap;
+ u8 qdpmc;
+};
+
+#define to_clk_audio_frac(hw) container_of(hw, struct clk_audio_frac, hw)
+#define to_clk_audio_pad(hw) container_of(hw, struct clk_audio_pad, hw)
+#define to_clk_audio_pmc(hw) container_of(hw, struct clk_audio_pmc, hw)
+
+static int clk_audio_pll_frac_enable(struct clk_hw *hw)
+{
+ struct clk_audio_frac *frac = to_clk_audio_frac(hw);
+
+ regmap_update_bits(frac->regmap, AT91_PMC_AUDIO_PLL0,
+ AT91_PMC_AUDIO_PLL_RESETN, 0);
+ regmap_update_bits(frac->regmap, AT91_PMC_AUDIO_PLL0,
+ AT91_PMC_AUDIO_PLL_RESETN,
+ AT91_PMC_AUDIO_PLL_RESETN);
+ regmap_update_bits(frac->regmap, AT91_PMC_AUDIO_PLL1,
+ AT91_PMC_AUDIO_PLL_FRACR_MASK, frac->fracr);
+
+ /*
+ * reset and enable have to be done in 2 separated writes
+ * for AT91_PMC_AUDIO_PLL0
+ */
+ regmap_update_bits(frac->regmap, AT91_PMC_AUDIO_PLL0,
+ AT91_PMC_AUDIO_PLL_PLLEN |
+ AT91_PMC_AUDIO_PLL_ND_MASK,
+ AT91_PMC_AUDIO_PLL_PLLEN |
+ AT91_PMC_AUDIO_PLL_ND(frac->nd));
+
+ return 0;
+}
+
+static int clk_audio_pll_pad_enable(struct clk_hw *hw)
+{
+ struct clk_audio_pad *apad_ck = to_clk_audio_pad(hw);
+
+ regmap_update_bits(apad_ck->regmap, AT91_PMC_AUDIO_PLL1,
+ AT91_PMC_AUDIO_PLL_QDPAD_MASK,
+ AUDIO_PLL_QDPAD(apad_ck->qdaudio, apad_ck->div));
+ regmap_update_bits(apad_ck->regmap, AT91_PMC_AUDIO_PLL0,
+ AT91_PMC_AUDIO_PLL_PADEN, AT91_PMC_AUDIO_PLL_PADEN);
+
+ return 0;
+}
+
+static int clk_audio_pll_pmc_enable(struct clk_hw *hw)
+{
+ struct clk_audio_pmc *apmc_ck = to_clk_audio_pmc(hw);
+
+ regmap_update_bits(apmc_ck->regmap, AT91_PMC_AUDIO_PLL0,
+ AT91_PMC_AUDIO_PLL_PMCEN |
+ AT91_PMC_AUDIO_PLL_QDPMC_MASK,
+ AT91_PMC_AUDIO_PLL_PMCEN |
+ AT91_PMC_AUDIO_PLL_QDPMC(apmc_ck->qdpmc));
+ return 0;
+}
+
+static void clk_audio_pll_frac_disable(struct clk_hw *hw)
+{
+ struct clk_audio_frac *frac = to_clk_audio_frac(hw);
+
+ regmap_update_bits(frac->regmap, AT91_PMC_AUDIO_PLL0,
+ AT91_PMC_AUDIO_PLL_PLLEN, 0);
+ /* do it in 2 separated writes */
+ regmap_update_bits(frac->regmap, AT91_PMC_AUDIO_PLL0,
+ AT91_PMC_AUDIO_PLL_RESETN, 0);
+}
+
+static void clk_audio_pll_pad_disable(struct clk_hw *hw)
+{
+ struct clk_audio_pad *apad_ck = to_clk_audio_pad(hw);
+
+ regmap_update_bits(apad_ck->regmap, AT91_PMC_AUDIO_PLL0,
+ AT91_PMC_AUDIO_PLL_PADEN, 0);
+}
+
+static void clk_audio_pll_pmc_disable(struct clk_hw *hw)
+{
+ struct clk_audio_pmc *apmc_ck = to_clk_audio_pmc(hw);
+
+ regmap_update_bits(apmc_ck->regmap, AT91_PMC_AUDIO_PLL0,
+ AT91_PMC_AUDIO_PLL_PMCEN, 0);
+}
+
+static unsigned long clk_audio_pll_fout(unsigned long parent_rate,
+ unsigned long nd, unsigned long fracr)
+{
+ unsigned long long fr = (unsigned long long)parent_rate * fracr;
+
+ pr_debug("A PLL: %s, fr = %llu\n", __func__, fr);
+
+ fr = DIV_ROUND_CLOSEST_ULL(fr, AUDIO_PLL_DIV_FRAC);
+
+ pr_debug("A PLL: %s, fr = %llu\n", __func__, fr);
+
+ return parent_rate * (nd + 1) + fr;
+}
+
+static unsigned long clk_audio_pll_frac_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_audio_frac *frac = to_clk_audio_frac(hw);
+ unsigned long fout;
+
+ fout = clk_audio_pll_fout(parent_rate, frac->nd, frac->fracr);
+
+ pr_debug("A PLL: %s, fout = %lu (nd = %u, fracr = %lu)\n", __func__,
+ fout, frac->nd, (unsigned long)frac->fracr);
+
+ return fout;
+}
+
+static unsigned long clk_audio_pll_pad_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_audio_pad *apad_ck = to_clk_audio_pad(hw);
+ unsigned long apad_rate = 0;
+
+ if (apad_ck->qdaudio && apad_ck->div)
+ apad_rate = parent_rate / (apad_ck->qdaudio * apad_ck->div);
+
+ pr_debug("A PLL/PAD: %s, apad_rate = %lu (div = %u, qdaudio = %u)\n",
+ __func__, apad_rate, apad_ck->div, apad_ck->qdaudio);
+
+ return apad_rate;
+}
+
+static unsigned long clk_audio_pll_pmc_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_audio_pmc *apmc_ck = to_clk_audio_pmc(hw);
+ unsigned long apmc_rate = 0;
+
+ apmc_rate = parent_rate / (apmc_ck->qdpmc + 1);
+
+ pr_debug("A PLL/PMC: %s, apmc_rate = %lu (qdpmc = %u)\n", __func__,
+ apmc_rate, apmc_ck->qdpmc);
+
+ return apmc_rate;
+}
+
+static int clk_audio_pll_frac_compute_frac(unsigned long rate,
+ unsigned long parent_rate,
+ unsigned long *nd,
+ unsigned long *fracr)
+{
+ unsigned long long tmp, rem;
+
+ if (!rate)
+ return -EINVAL;
+
+ tmp = rate;
+ rem = do_div(tmp, parent_rate);
+ if (!tmp || tmp >= AUDIO_PLL_ND_MAX)
+ return -EINVAL;
+
+ *nd = tmp - 1;
+
+ tmp = rem * AUDIO_PLL_DIV_FRAC;
+ tmp = DIV_ROUND_CLOSEST_ULL(tmp, parent_rate);
+ if (tmp > AT91_PMC_AUDIO_PLL_FRACR_MASK)
+ return -EINVAL;
+
+ /* we can cast here as we verified the bounds just above */
+ *fracr = (unsigned long)tmp;
+
+ return 0;
+}
+
+static int clk_audio_pll_frac_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ unsigned long fracr, nd;
+ int ret;
+
+ pr_debug("A PLL: %s, rate = %lu (parent_rate = %lu)\n", __func__,
+ req->rate, req->best_parent_rate);
+
+ req->rate = clamp(req->rate, AUDIO_PLL_FOUT_MIN, AUDIO_PLL_FOUT_MAX);
+
+ req->min_rate = max(req->min_rate, AUDIO_PLL_FOUT_MIN);
+ req->max_rate = min(req->max_rate, AUDIO_PLL_FOUT_MAX);
+
+ ret = clk_audio_pll_frac_compute_frac(req->rate, req->best_parent_rate,
+ &nd, &fracr);
+ if (ret)
+ return ret;
+
+ req->rate = clk_audio_pll_fout(req->best_parent_rate, nd, fracr);
+
+ req->best_parent_hw = clk_hw_get_parent(hw);
+
+ pr_debug("A PLL: %s, best_rate = %lu (nd = %lu, fracr = %lu)\n",
+ __func__, req->rate, nd, fracr);
+
+ return 0;
+}
+
+static long clk_audio_pll_pad_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ struct clk_hw *pclk = clk_hw_get_parent(hw);
+ long best_rate = -EINVAL;
+ unsigned long best_parent_rate;
+ unsigned long tmp_qd;
+ u32 div;
+ long tmp_rate;
+ int tmp_diff;
+ int best_diff = -1;
+
+ pr_debug("A PLL/PAD: %s, rate = %lu (parent_rate = %lu)\n", __func__,
+ rate, *parent_rate);
+
+ /*
+ * Rate divisor is actually made of two different divisors, multiplied
+ * between themselves before dividing the rate.
+ * tmp_qd goes from 1 to 31 and div is either 2 or 3.
+ * In order to avoid testing twice the rate divisor (e.g. divisor 12 can
+ * be found with (tmp_qd, div) = (2, 6) or (3, 4)), we remove any loop
+ * for a rate divisor when div is 2 and tmp_qd is a multiple of 3.
+ * We cannot inverse it (condition div is 3 and tmp_qd is even) or we
+ * would miss some rate divisor that aren't reachable with div being 2
+ * (e.g. rate divisor 90 is made with div = 3 and tmp_qd = 30, thus
+ * tmp_qd is even so we skip it because we think div 2 could make this
+ * rate divisor which isn't possible since tmp_qd has to be <= 31).
+ */
+ for (tmp_qd = 1; tmp_qd < AT91_PMC_AUDIO_PLL_QDPAD_EXTDIV_MAX; tmp_qd++)
+ for (div = 2; div <= 3; div++) {
+ if (div == 2 && tmp_qd % 3 == 0)
+ continue;
+
+ best_parent_rate = clk_hw_round_rate(pclk,
+ rate * tmp_qd * div);
+ tmp_rate = best_parent_rate / (div * tmp_qd);
+ tmp_diff = abs(rate - tmp_rate);
+
+ if (best_diff < 0 || best_diff > tmp_diff) {
+ *parent_rate = best_parent_rate;
+ best_rate = tmp_rate;
+ best_diff = tmp_diff;
+ }
+ }
+
+ pr_debug("A PLL/PAD: %s, best_rate = %ld, best_parent_rate = %lu\n",
+ __func__, best_rate, best_parent_rate);
+
+ return best_rate;
+}
+
+static long clk_audio_pll_pmc_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ struct clk_hw *pclk = clk_hw_get_parent(hw);
+ long best_rate = -EINVAL;
+ unsigned long best_parent_rate = 0;
+ u32 tmp_qd = 0, div;
+ long tmp_rate;
+ int tmp_diff;
+ int best_diff = -1;
+
+ pr_debug("A PLL/PMC: %s, rate = %lu (parent_rate = %lu)\n", __func__,
+ rate, *parent_rate);
+
+ for (div = 1; div <= AUDIO_PLL_QDPMC_MAX; div++) {
+ best_parent_rate = clk_round_rate(pclk->clk, rate * div);
+ tmp_rate = best_parent_rate / div;
+ tmp_diff = abs(rate - tmp_rate);
+
+ if (best_diff < 0 || best_diff > tmp_diff) {
+ *parent_rate = best_parent_rate;
+ best_rate = tmp_rate;
+ best_diff = tmp_diff;
+ tmp_qd = div;
+ }
+ }
+
+ pr_debug("A PLL/PMC: %s, best_rate = %ld, best_parent_rate = %lu (qd = %d)\n",
+ __func__, best_rate, *parent_rate, tmp_qd - 1);
+
+ return best_rate;
+}
+
+static int clk_audio_pll_frac_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_audio_frac *frac = to_clk_audio_frac(hw);
+ unsigned long fracr, nd;
+ int ret;
+
+ pr_debug("A PLL: %s, rate = %lu (parent_rate = %lu)\n", __func__, rate,
+ parent_rate);
+
+ if (rate < AUDIO_PLL_FOUT_MIN || rate > AUDIO_PLL_FOUT_MAX)
+ return -EINVAL;
+
+ ret = clk_audio_pll_frac_compute_frac(rate, parent_rate, &nd, &fracr);
+ if (ret)
+ return ret;
+
+ frac->nd = nd;
+ frac->fracr = fracr;
+
+ return 0;
+}
+
+static int clk_audio_pll_pad_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_audio_pad *apad_ck = to_clk_audio_pad(hw);
+ u8 tmp_div;
+
+ pr_debug("A PLL/PAD: %s, rate = %lu (parent_rate = %lu)\n", __func__,
+ rate, parent_rate);
+
+ if (!rate)
+ return -EINVAL;
+
+ tmp_div = parent_rate / rate;
+ if (tmp_div % 3 == 0) {
+ apad_ck->qdaudio = tmp_div / 3;
+ apad_ck->div = 3;
+ } else {
+ apad_ck->qdaudio = tmp_div / 2;
+ apad_ck->div = 2;
+ }
+
+ return 0;
+}
+
+static int clk_audio_pll_pmc_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_audio_pmc *apmc_ck = to_clk_audio_pmc(hw);
+
+ if (!rate)
+ return -EINVAL;
+
+ pr_debug("A PLL/PMC: %s, rate = %lu (parent_rate = %lu)\n", __func__,
+ rate, parent_rate);
+
+ apmc_ck->qdpmc = parent_rate / rate - 1;
+
+ return 0;
+}
+
+static const struct clk_ops audio_pll_frac_ops = {
+ .enable = clk_audio_pll_frac_enable,
+ .disable = clk_audio_pll_frac_disable,
+ .recalc_rate = clk_audio_pll_frac_recalc_rate,
+ .determine_rate = clk_audio_pll_frac_determine_rate,
+ .set_rate = clk_audio_pll_frac_set_rate,
+};
+
+static const struct clk_ops audio_pll_pad_ops = {
+ .enable = clk_audio_pll_pad_enable,
+ .disable = clk_audio_pll_pad_disable,
+ .recalc_rate = clk_audio_pll_pad_recalc_rate,
+ .round_rate = clk_audio_pll_pad_round_rate,
+ .set_rate = clk_audio_pll_pad_set_rate,
+};
+
+static const struct clk_ops audio_pll_pmc_ops = {
+ .enable = clk_audio_pll_pmc_enable,
+ .disable = clk_audio_pll_pmc_disable,
+ .recalc_rate = clk_audio_pll_pmc_recalc_rate,
+ .round_rate = clk_audio_pll_pmc_round_rate,
+ .set_rate = clk_audio_pll_pmc_set_rate,
+};
+
+static int of_sama5d2_clk_audio_pll_setup(struct device_node *np,
+ struct clk_init_data *init,
+ struct clk_hw *hw,
+ struct regmap **clk_audio_regmap)
+{
+ struct regmap *regmap;
+ const char *parent_names[1];
+ int ret;
+
+ regmap = syscon_node_to_regmap(of_get_parent(np));
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ init->name = np->name;
+ of_clk_parent_fill(np, parent_names, 1);
+ init->parent_names = parent_names;
+ init->num_parents = 1;
+
+ hw->init = init;
+ *clk_audio_regmap = regmap;
+
+ ret = clk_hw_register(NULL, hw);
+ if (ret)
+ return ret;
+
+ return of_clk_add_hw_provider(np, of_clk_hw_simple_get, hw);
+}
+
+static void __init of_sama5d2_clk_audio_pll_frac_setup(struct device_node *np)
+{
+ struct clk_audio_frac *frac_ck;
+ struct clk_init_data init = {};
+
+ frac_ck = kzalloc(sizeof(*frac_ck), GFP_KERNEL);
+ if (!frac_ck)
+ return;
+
+ init.ops = &audio_pll_frac_ops;
+ init.flags = CLK_SET_RATE_GATE;
+
+ if (of_sama5d2_clk_audio_pll_setup(np, &init, &frac_ck->hw,
+ &frac_ck->regmap))
+ kfree(frac_ck);
+}
+
+static void __init of_sama5d2_clk_audio_pll_pad_setup(struct device_node *np)
+{
+ struct clk_audio_pad *apad_ck;
+ struct clk_init_data init = {};
+
+ apad_ck = kzalloc(sizeof(*apad_ck), GFP_KERNEL);
+ if (!apad_ck)
+ return;
+
+ init.ops = &audio_pll_pad_ops;
+ init.flags = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE |
+ CLK_SET_RATE_PARENT;
+
+ if (of_sama5d2_clk_audio_pll_setup(np, &init, &apad_ck->hw,
+ &apad_ck->regmap))
+ kfree(apad_ck);
+}
+
+static void __init of_sama5d2_clk_audio_pll_pmc_setup(struct device_node *np)
+{
+ struct clk_audio_pad *apmc_ck;
+ struct clk_init_data init = {};
+
+ apmc_ck = kzalloc(sizeof(*apmc_ck), GFP_KERNEL);
+ if (!apmc_ck)
+ return;
+
+ init.ops = &audio_pll_pmc_ops;
+ init.flags = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE |
+ CLK_SET_RATE_PARENT;
+
+ if (of_sama5d2_clk_audio_pll_setup(np, &init, &apmc_ck->hw,
+ &apmc_ck->regmap))
+ kfree(apmc_ck);
+}
+
+CLK_OF_DECLARE(of_sama5d2_clk_audio_pll_frac_setup,
+ "atmel,sama5d2-clk-audio-pll-frac",
+ of_sama5d2_clk_audio_pll_frac_setup);
+CLK_OF_DECLARE(of_sama5d2_clk_audio_pll_pad_setup,
+ "atmel,sama5d2-clk-audio-pll-pad",
+ of_sama5d2_clk_audio_pll_pad_setup);
+CLK_OF_DECLARE(of_sama5d2_clk_audio_pll_pmc_setup,
+ "atmel,sama5d2-clk-audio-pll-pmc",
+ of_sama5d2_clk_audio_pll_pmc_setup);
diff --git a/drivers/clk/at91/clk-generated.c b/drivers/clk/at91/clk-generated.c
index f0b7ae904ce2..33481368740e 100644
--- a/drivers/clk/at91/clk-generated.c
+++ b/drivers/clk/at91/clk-generated.c
@@ -26,6 +26,13 @@
#define GENERATED_SOURCE_MAX 6
#define GENERATED_MAX_DIV 255
+#define GCK_ID_SSC0 43
+#define GCK_ID_SSC1 44
+#define GCK_ID_I2S0 54
+#define GCK_ID_I2S1 55
+#define GCK_ID_CLASSD 59
+#define GCK_INDEX_DT_AUDIO_PLL 5
+
struct clk_generated {
struct clk_hw hw;
struct regmap *regmap;
@@ -34,6 +41,7 @@ struct clk_generated {
u32 id;
u32 gckdiv;
u8 parent_id;
+ bool audio_pll_allowed;
};
#define to_clk_generated(hw) \
@@ -99,21 +107,41 @@ clk_generated_recalc_rate(struct clk_hw *hw,
return DIV_ROUND_CLOSEST(parent_rate, gck->gckdiv + 1);
}
+static void clk_generated_best_diff(struct clk_rate_request *req,
+ struct clk_hw *parent,
+ unsigned long parent_rate, u32 div,
+ int *best_diff, long *best_rate)
+{
+ unsigned long tmp_rate;
+ int tmp_diff;
+
+ if (!div)
+ tmp_rate = parent_rate;
+ else
+ tmp_rate = parent_rate / div;
+ tmp_diff = abs(req->rate - tmp_rate);
+
+ if (*best_diff < 0 || *best_diff > tmp_diff) {
+ *best_rate = tmp_rate;
+ *best_diff = tmp_diff;
+ req->best_parent_rate = parent_rate;
+ req->best_parent_hw = parent;
+ }
+}
+
static int clk_generated_determine_rate(struct clk_hw *hw,
struct clk_rate_request *req)
{
struct clk_generated *gck = to_clk_generated(hw);
struct clk_hw *parent = NULL;
+ struct clk_rate_request req_parent = *req;
long best_rate = -EINVAL;
- unsigned long tmp_rate, min_rate;
+ unsigned long min_rate, parent_rate;
int best_diff = -1;
- int tmp_diff;
int i;
+ u32 div;
- for (i = 0; i < clk_hw_get_num_parents(hw); i++) {
- u32 div;
- unsigned long parent_rate;
-
+ for (i = 0; i < clk_hw_get_num_parents(hw) - 1; i++) {
parent = clk_hw_get_parent_by_index(hw, i);
if (!parent)
continue;
@@ -124,25 +152,43 @@ static int clk_generated_determine_rate(struct clk_hw *hw,
(gck->range.max && min_rate > gck->range.max))
continue;
- for (div = 1; div < GENERATED_MAX_DIV + 2; div++) {
- tmp_rate = DIV_ROUND_CLOSEST(parent_rate, div);
- tmp_diff = abs(req->rate - tmp_rate);
+ div = DIV_ROUND_CLOSEST(parent_rate, req->rate);
- if (best_diff < 0 || best_diff > tmp_diff) {
- best_rate = tmp_rate;
- best_diff = tmp_diff;
- req->best_parent_rate = parent_rate;
- req->best_parent_hw = parent;
- }
+ clk_generated_best_diff(req, parent, parent_rate, div,
+ &best_diff, &best_rate);
- if (!best_diff || tmp_rate < req->rate)
- break;
- }
+ if (!best_diff)
+ break;
+ }
+
+ /*
+ * The audio_pll rate can be modified, unlike the five others clocks
+ * that should never be altered.
+ * The audio_pll can technically be used by multiple consumers. However,
+ * with the rate locking, the first consumer to enable to clock will be
+ * the one definitely setting the rate of the clock.
+ * Since audio IPs are most likely to request the same rate, we enforce
+ * that the only clks able to modify gck rate are those of audio IPs.
+ */
+
+ if (!gck->audio_pll_allowed)
+ goto end;
+
+ parent = clk_hw_get_parent_by_index(hw, GCK_INDEX_DT_AUDIO_PLL);
+ if (!parent)
+ goto end;
+
+ for (div = 1; div < GENERATED_MAX_DIV + 2; div++) {
+ req_parent.rate = req->rate * div;
+ __clk_determine_rate(parent, &req_parent);
+ clk_generated_best_diff(req, parent, req_parent.rate, div,
+ &best_diff, &best_rate);
if (!best_diff)
break;
}
+end:
pr_debug("GCLK: %s, best_rate = %ld, parent clk: %s @ %ld\n",
__func__, best_rate,
__clk_get_name((req->best_parent_hw)->clk),
@@ -252,7 +298,8 @@ at91_clk_register_generated(struct regmap *regmap, spinlock_t *lock,
init.ops = &generated_ops;
init.parent_names = parent_names;
init.num_parents = num_parents;
- init.flags = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE;
+ init.flags = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE |
+ CLK_SET_RATE_PARENT;
gck->id = id;
gck->hw.init = &init;
@@ -284,6 +331,7 @@ static void __init of_sama5d2_clk_generated_setup(struct device_node *np)
struct device_node *gcknp;
struct clk_range range = CLK_RANGE(0, 0);
struct regmap *regmap;
+ struct clk_generated *gck;
num_parents = of_clk_get_parent_count(np);
if (num_parents == 0 || num_parents > GENERATED_SOURCE_MAX)
@@ -315,6 +363,21 @@ static void __init of_sama5d2_clk_generated_setup(struct device_node *np)
hw = at91_clk_register_generated(regmap, &pmc_pcr_lock, name,
parent_names, num_parents,
id, &range);
+
+ gck = to_clk_generated(hw);
+
+ if (of_device_is_compatible(np,
+ "atmel,sama5d2-clk-generated")) {
+ if (gck->id == GCK_ID_SSC0 || gck->id == GCK_ID_SSC1 ||
+ gck->id == GCK_ID_I2S0 || gck->id == GCK_ID_I2S1 ||
+ gck->id == GCK_ID_CLASSD)
+ gck->audio_pll_allowed = true;
+ else
+ gck->audio_pll_allowed = false;
+ } else {
+ gck->audio_pll_allowed = false;
+ }
+
if (IS_ERR(hw))
continue;
diff --git a/drivers/clk/axs10x/Makefile b/drivers/clk/axs10x/Makefile
index 01996b871b06..d747deafbf1e 100644
--- a/drivers/clk/axs10x/Makefile
+++ b/drivers/clk/axs10x/Makefile
@@ -1 +1,2 @@
obj-y += i2s_pll_clock.o
+obj-y += pll_clock.o
diff --git a/drivers/clk/axs10x/pll_clock.c b/drivers/clk/axs10x/pll_clock.c
new file mode 100644
index 000000000000..25d8c240ddfb
--- /dev/null
+++ b/drivers/clk/axs10x/pll_clock.c
@@ -0,0 +1,346 @@
+/*
+ * Synopsys AXS10X SDP Generic PLL clock driver
+ *
+ * Copyright (C) 2017 Synopsys
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/device.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+
+/* PLL registers addresses */
+#define PLL_REG_IDIV 0x0
+#define PLL_REG_FBDIV 0x4
+#define PLL_REG_ODIV 0x8
+
+/*
+ * Bit fields of the PLL IDIV/FBDIV/ODIV registers:
+ * ________________________________________________________________________
+ * |31 15| 14 | 13 | 12 |11 6|5 0|
+ * |-------RESRVED------|-NOUPDATE-|-BYPASS-|-EDGE-|--HIGHTIME--|--LOWTIME--|
+ * |____________________|__________|________|______|____________|___________|
+ *
+ * Following macros determine the way of access to these registers
+ * They should be set up only using the macros.
+ * reg should be an u32 variable.
+ */
+
+#define PLL_REG_GET_LOW(reg) \
+ (((reg) & (0x3F << 0)) >> 0)
+#define PLL_REG_GET_HIGH(reg) \
+ (((reg) & (0x3F << 6)) >> 6)
+#define PLL_REG_GET_EDGE(reg) \
+ (((reg) & (BIT(12))) ? 1 : 0)
+#define PLL_REG_GET_BYPASS(reg) \
+ (((reg) & (BIT(13))) ? 1 : 0)
+#define PLL_REG_GET_NOUPD(reg) \
+ (((reg) & (BIT(14))) ? 1 : 0)
+#define PLL_REG_GET_PAD(reg) \
+ (((reg) & (0x1FFFF << 15)) >> 15)
+
+#define PLL_REG_SET_LOW(reg, value) \
+ { reg |= (((value) & 0x3F) << 0); }
+#define PLL_REG_SET_HIGH(reg, value) \
+ { reg |= (((value) & 0x3F) << 6); }
+#define PLL_REG_SET_EDGE(reg, value) \
+ { reg |= (((value) & 0x01) << 12); }
+#define PLL_REG_SET_BYPASS(reg, value) \
+ { reg |= (((value) & 0x01) << 13); }
+#define PLL_REG_SET_NOUPD(reg, value) \
+ { reg |= (((value) & 0x01) << 14); }
+#define PLL_REG_SET_PAD(reg, value) \
+ { reg |= (((value) & 0x1FFFF) << 15); }
+
+#define PLL_LOCK BIT(0)
+#define PLL_ERROR BIT(1)
+#define PLL_MAX_LOCK_TIME 100 /* 100 us */
+
+struct axs10x_pll_cfg {
+ u32 rate;
+ u32 idiv;
+ u32 fbdiv;
+ u32 odiv;
+};
+
+static const struct axs10x_pll_cfg arc_pll_cfg[] = {
+ { 33333333, 1, 1, 1 },
+ { 50000000, 1, 30, 20 },
+ { 75000000, 2, 45, 10 },
+ { 90000000, 2, 54, 10 },
+ { 100000000, 1, 30, 10 },
+ { 125000000, 2, 45, 6 },
+ {}
+};
+
+static const struct axs10x_pll_cfg pgu_pll_cfg[] = {
+ { 25200000, 1, 84, 90 },
+ { 50000000, 1, 100, 54 },
+ { 74250000, 1, 44, 16 },
+ {}
+};
+
+struct axs10x_pll_clk {
+ struct clk_hw hw;
+ void __iomem *base;
+ void __iomem *lock;
+ const struct axs10x_pll_cfg *pll_cfg;
+ struct device *dev;
+};
+
+static inline void axs10x_pll_write(struct axs10x_pll_clk *clk, u32 reg,
+ u32 val)
+{
+ iowrite32(val, clk->base + reg);
+}
+
+static inline u32 axs10x_pll_read(struct axs10x_pll_clk *clk, u32 reg)
+{
+ return ioread32(clk->base + reg);
+}
+
+static inline struct axs10x_pll_clk *to_axs10x_pll_clk(struct clk_hw *hw)
+{
+ return container_of(hw, struct axs10x_pll_clk, hw);
+}
+
+static inline u32 axs10x_div_get_value(u32 reg)
+{
+ if (PLL_REG_GET_BYPASS(reg))
+ return 1;
+
+ return PLL_REG_GET_HIGH(reg) + PLL_REG_GET_LOW(reg);
+}
+
+static inline u32 axs10x_encode_div(unsigned int id, int upd)
+{
+ u32 div = 0;
+
+ PLL_REG_SET_LOW(div, (id % 2 == 0) ? id >> 1 : (id >> 1) + 1);
+ PLL_REG_SET_HIGH(div, id >> 1);
+ PLL_REG_SET_EDGE(div, id % 2);
+ PLL_REG_SET_BYPASS(div, id == 1 ? 1 : 0);
+ PLL_REG_SET_NOUPD(div, upd == 0 ? 1 : 0);
+
+ return div;
+}
+
+static unsigned long axs10x_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ u64 rate;
+ u32 idiv, fbdiv, odiv;
+ struct axs10x_pll_clk *clk = to_axs10x_pll_clk(hw);
+
+ idiv = axs10x_div_get_value(axs10x_pll_read(clk, PLL_REG_IDIV));
+ fbdiv = axs10x_div_get_value(axs10x_pll_read(clk, PLL_REG_FBDIV));
+ odiv = axs10x_div_get_value(axs10x_pll_read(clk, PLL_REG_ODIV));
+
+ rate = (u64)parent_rate * fbdiv;
+ do_div(rate, idiv * odiv);
+
+ return rate;
+}
+
+static long axs10x_pll_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ int i;
+ long best_rate;
+ struct axs10x_pll_clk *clk = to_axs10x_pll_clk(hw);
+ const struct axs10x_pll_cfg *pll_cfg = clk->pll_cfg;
+
+ if (pll_cfg[0].rate == 0)
+ return -EINVAL;
+
+ best_rate = pll_cfg[0].rate;
+
+ for (i = 1; pll_cfg[i].rate != 0; i++) {
+ if (abs(rate - pll_cfg[i].rate) < abs(rate - best_rate))
+ best_rate = pll_cfg[i].rate;
+ }
+
+ return best_rate;
+}
+
+static int axs10x_pll_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ int i;
+ struct axs10x_pll_clk *clk = to_axs10x_pll_clk(hw);
+ const struct axs10x_pll_cfg *pll_cfg = clk->pll_cfg;
+
+ for (i = 0; pll_cfg[i].rate != 0; i++) {
+ if (pll_cfg[i].rate == rate) {
+ axs10x_pll_write(clk, PLL_REG_IDIV,
+ axs10x_encode_div(pll_cfg[i].idiv, 0));
+ axs10x_pll_write(clk, PLL_REG_FBDIV,
+ axs10x_encode_div(pll_cfg[i].fbdiv, 0));
+ axs10x_pll_write(clk, PLL_REG_ODIV,
+ axs10x_encode_div(pll_cfg[i].odiv, 1));
+
+ /*
+ * Wait until CGU relocks and check error status.
+ * If after timeout CGU is unlocked yet return error
+ */
+ udelay(PLL_MAX_LOCK_TIME);
+ if (!(ioread32(clk->lock) & PLL_LOCK))
+ return -ETIMEDOUT;
+
+ if (ioread32(clk->lock) & PLL_ERROR)
+ return -EINVAL;
+
+ return 0;
+ }
+ }
+
+ dev_err(clk->dev, "invalid rate=%ld, parent_rate=%ld\n", rate,
+ parent_rate);
+ return -EINVAL;
+}
+
+static const struct clk_ops axs10x_pll_ops = {
+ .recalc_rate = axs10x_pll_recalc_rate,
+ .round_rate = axs10x_pll_round_rate,
+ .set_rate = axs10x_pll_set_rate,
+};
+
+static int axs10x_pll_clk_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ const char *parent_name;
+ struct axs10x_pll_clk *pll_clk;
+ struct resource *mem;
+ struct clk_init_data init = { };
+ int ret;
+
+ pll_clk = devm_kzalloc(dev, sizeof(*pll_clk), GFP_KERNEL);
+ if (!pll_clk)
+ return -ENOMEM;
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ pll_clk->base = devm_ioremap_resource(dev, mem);
+ if (IS_ERR(pll_clk->base))
+ return PTR_ERR(pll_clk->base);
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ pll_clk->lock = devm_ioremap_resource(dev, mem);
+ if (IS_ERR(pll_clk->lock))
+ return PTR_ERR(pll_clk->lock);
+
+ init.name = dev->of_node->name;
+ init.ops = &axs10x_pll_ops;
+ parent_name = of_clk_get_parent_name(dev->of_node, 0);
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+ pll_clk->hw.init = &init;
+ pll_clk->dev = dev;
+ pll_clk->pll_cfg = of_device_get_match_data(dev);
+
+ if (!pll_clk->pll_cfg) {
+ dev_err(dev, "No OF match data provided\n");
+ return -EINVAL;
+ }
+
+ ret = devm_clk_hw_register(dev, &pll_clk->hw);
+ if (ret) {
+ dev_err(dev, "failed to register %s clock\n", init.name);
+ return ret;
+ }
+
+ return of_clk_add_hw_provider(dev->of_node, of_clk_hw_simple_get,
+ &pll_clk->hw);
+}
+
+static int axs10x_pll_clk_remove(struct platform_device *pdev)
+{
+ of_clk_del_provider(pdev->dev.of_node);
+ return 0;
+}
+
+static void __init of_axs10x_pll_clk_setup(struct device_node *node)
+{
+ const char *parent_name;
+ struct axs10x_pll_clk *pll_clk;
+ struct clk_init_data init = { };
+ int ret;
+
+ pll_clk = kzalloc(sizeof(*pll_clk), GFP_KERNEL);
+ if (!pll_clk)
+ return;
+
+ pll_clk->base = of_iomap(node, 0);
+ if (!pll_clk->base) {
+ pr_err("failed to map pll div registers\n");
+ goto err_free_pll_clk;
+ }
+
+ pll_clk->lock = of_iomap(node, 1);
+ if (!pll_clk->lock) {
+ pr_err("failed to map pll lock register\n");
+ goto err_unmap_base;
+ }
+
+ init.name = node->name;
+ init.ops = &axs10x_pll_ops;
+ parent_name = of_clk_get_parent_name(node, 0);
+ init.parent_names = &parent_name;
+ init.num_parents = parent_name ? 1 : 0;
+ pll_clk->hw.init = &init;
+ pll_clk->pll_cfg = arc_pll_cfg;
+
+ ret = clk_hw_register(NULL, &pll_clk->hw);
+ if (ret) {
+ pr_err("failed to register %s clock\n", node->name);
+ goto err_unmap_lock;
+ }
+
+ ret = of_clk_add_hw_provider(node, of_clk_hw_simple_get, &pll_clk->hw);
+ if (ret) {
+ pr_err("failed to add hw provider for %s clock\n", node->name);
+ goto err_unregister_clk;
+ }
+
+ return;
+
+err_unregister_clk:
+ clk_hw_unregister(&pll_clk->hw);
+err_unmap_lock:
+ iounmap(pll_clk->lock);
+err_unmap_base:
+ iounmap(pll_clk->base);
+err_free_pll_clk:
+ kfree(pll_clk);
+}
+CLK_OF_DECLARE(axs10x_pll_clock, "snps,axs10x-arc-pll-clock",
+ of_axs10x_pll_clk_setup);
+
+static const struct of_device_id axs10x_pll_clk_id[] = {
+ { .compatible = "snps,axs10x-pgu-pll-clock", .data = &pgu_pll_cfg},
+ { }
+};
+MODULE_DEVICE_TABLE(of, axs10x_pll_clk_id);
+
+static struct platform_driver axs10x_pll_clk_driver = {
+ .driver = {
+ .name = "axs10x-pll-clock",
+ .of_match_table = axs10x_pll_clk_id,
+ },
+ .probe = axs10x_pll_clk_probe,
+ .remove = axs10x_pll_clk_remove,
+};
+builtin_platform_driver(axs10x_pll_clk_driver);
+
+MODULE_AUTHOR("Vlad Zakharov <vzakhar@synopsys.com>");
+MODULE_DESCRIPTION("Synopsys AXS10X SDP Generic PLL Clock Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/berlin/bg2.c b/drivers/clk/berlin/bg2.c
index 1d99292e2039..e7331ace0337 100644
--- a/drivers/clk/berlin/bg2.c
+++ b/drivers/clk/berlin/bg2.c
@@ -679,8 +679,7 @@ static void __init berlin2_clock_setup(struct device_node *np)
if (!IS_ERR(hws[n]))
continue;
- pr_err("%s: Unable to register leaf clock %d\n",
- np->full_name, n);
+ pr_err("%pOF: Unable to register leaf clock %d\n", np, n);
goto bg2_fail;
}
diff --git a/drivers/clk/berlin/bg2q.c b/drivers/clk/berlin/bg2q.c
index 3b784b593afd..67c270b143f7 100644
--- a/drivers/clk/berlin/bg2q.c
+++ b/drivers/clk/berlin/bg2q.c
@@ -304,14 +304,14 @@ static void __init berlin2q_clock_setup(struct device_node *np)
gbase = of_iomap(parent_np, 0);
if (!gbase) {
- pr_err("%s: Unable to map global base\n", np->full_name);
+ pr_err("%pOF: Unable to map global base\n", np);
return;
}
/* BG2Q CPU PLL is not part of global registers */
cpupll_base = of_iomap(parent_np, 1);
if (!cpupll_base) {
- pr_err("%s: Unable to map cpupll base\n", np->full_name);
+ pr_err("%pOF: Unable to map cpupll base\n", np);
iounmap(gbase);
return;
}
@@ -376,8 +376,7 @@ static void __init berlin2q_clock_setup(struct device_node *np)
if (!IS_ERR(hws[n]))
continue;
- pr_err("%s: Unable to register leaf clock %d\n",
- np->full_name, n);
+ pr_err("%pOF: Unable to register leaf clock %d\n", np, n);
goto bg2q_fail;
}
diff --git a/drivers/clk/clk-asm9260.c b/drivers/clk/clk-asm9260.c
index ea8568536193..bf0582cbbf38 100644
--- a/drivers/clk/clk-asm9260.c
+++ b/drivers/clk/clk-asm9260.c
@@ -338,8 +338,8 @@ static void __init asm9260_acc_init(struct device_node *np)
if (!IS_ERR(hws[n]))
continue;
- pr_err("%s: Unable to register leaf clock %d\n",
- np->full_name, n);
+ pr_err("%pOF: Unable to register leaf clock %d\n",
+ np, n);
goto fail;
}
diff --git a/drivers/clk/clk-conf.c b/drivers/clk/clk-conf.c
index 7ec36722f8ab..49819b546134 100644
--- a/drivers/clk/clk-conf.c
+++ b/drivers/clk/clk-conf.c
@@ -23,8 +23,8 @@ static int __set_clk_parents(struct device_node *node, bool clk_supplier)
num_parents = of_count_phandle_with_args(node, "assigned-clock-parents",
"#clock-cells");
if (num_parents == -EINVAL)
- pr_err("clk: invalid value of clock-parents property at %s\n",
- node->full_name);
+ pr_err("clk: invalid value of clock-parents property at %pOF\n",
+ node);
for (index = 0; index < num_parents; index++) {
rc = of_parse_phandle_with_args(node, "assigned-clock-parents",
@@ -41,8 +41,8 @@ static int __set_clk_parents(struct device_node *node, bool clk_supplier)
pclk = of_clk_get_from_provider(&clkspec);
if (IS_ERR(pclk)) {
if (PTR_ERR(pclk) != -EPROBE_DEFER)
- pr_warn("clk: couldn't get parent clock %d for %s\n",
- index, node->full_name);
+ pr_warn("clk: couldn't get parent clock %d for %pOF\n",
+ index, node);
return PTR_ERR(pclk);
}
@@ -57,8 +57,8 @@ static int __set_clk_parents(struct device_node *node, bool clk_supplier)
clk = of_clk_get_from_provider(&clkspec);
if (IS_ERR(clk)) {
if (PTR_ERR(clk) != -EPROBE_DEFER)
- pr_warn("clk: couldn't get assigned clock %d for %s\n",
- index, node->full_name);
+ pr_warn("clk: couldn't get assigned clock %d for %pOF\n",
+ index, node);
rc = PTR_ERR(clk);
goto err;
}
@@ -102,8 +102,8 @@ static int __set_clk_rates(struct device_node *node, bool clk_supplier)
clk = of_clk_get_from_provider(&clkspec);
if (IS_ERR(clk)) {
if (PTR_ERR(clk) != -EPROBE_DEFER)
- pr_warn("clk: couldn't get clock %d for %s\n",
- index, node->full_name);
+ pr_warn("clk: couldn't get clock %d for %pOF\n",
+ index, node);
return PTR_ERR(clk);
}
diff --git a/drivers/clk/clk-cs2000-cp.c b/drivers/clk/clk-cs2000-cp.c
index c54baede4d68..e8ea81c30f0c 100644
--- a/drivers/clk/clk-cs2000-cp.c
+++ b/drivers/clk/clk-cs2000-cp.c
@@ -343,6 +343,15 @@ static int cs2000_set_rate(struct clk_hw *hw,
return __cs2000_set_rate(priv, ch, rate, parent_rate);
}
+static int cs2000_set_saved_rate(struct cs2000_priv *priv)
+{
+ int ch = 0; /* it uses ch0 only at this point */
+
+ return __cs2000_set_rate(priv, ch,
+ priv->saved_rate,
+ priv->saved_parent_rate);
+}
+
static int cs2000_enable(struct clk_hw *hw)
{
struct cs2000_priv *priv = hw_to_priv(hw);
@@ -535,11 +544,8 @@ probe_err:
static int cs2000_resume(struct device *dev)
{
struct cs2000_priv *priv = dev_get_drvdata(dev);
- int ch = 0; /* it uses ch0 only at this point */
- return __cs2000_set_rate(priv, ch,
- priv->saved_rate,
- priv->saved_parent_rate);
+ return cs2000_set_saved_rate(priv);
}
static const struct dev_pm_ops cs2000_pm_ops = {
diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c
index 9bb472cccca6..4ed516cb7276 100644
--- a/drivers/clk/clk-divider.c
+++ b/drivers/clk/clk-divider.c
@@ -385,12 +385,14 @@ static int clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct clk_divider *divider = to_clk_divider(hw);
- unsigned int value;
+ int value;
unsigned long flags = 0;
u32 val;
value = divider_get_val(rate, parent_rate, divider->table,
divider->width, divider->flags);
+ if (value < 0)
+ return value;
if (divider->lock)
spin_lock_irqsave(divider->lock, flags);
@@ -403,7 +405,7 @@ static int clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
val = clk_readl(divider->reg);
val &= ~(div_mask(divider->width) << divider->shift);
}
- val |= value << divider->shift;
+ val |= (u32)value << divider->shift;
clk_writel(val, divider->reg);
if (divider->lock)
diff --git a/drivers/clk/clk-fractional-divider.c b/drivers/clk/clk-fractional-divider.c
index aab904618eb6..fdf625fb10fa 100644
--- a/drivers/clk/clk-fractional-divider.c
+++ b/drivers/clk/clk-fractional-divider.c
@@ -49,16 +49,12 @@ static unsigned long clk_fd_recalc_rate(struct clk_hw *hw,
return ret;
}
-static long clk_fd_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static void clk_fd_general_approximation(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate,
+ unsigned long *m, unsigned long *n)
{
struct clk_fractional_divider *fd = to_clk_fd(hw);
unsigned long scale;
- unsigned long m, n;
- u64 ret;
-
- if (!rate || rate >= *parent_rate)
- return *parent_rate;
/*
* Get rate closer to *parent_rate to guarantee there is no overflow
@@ -71,7 +67,23 @@ static long clk_fd_round_rate(struct clk_hw *hw, unsigned long rate,
rational_best_approximation(rate, *parent_rate,
GENMASK(fd->mwidth - 1, 0), GENMASK(fd->nwidth - 1, 0),
- &m, &n);
+ m, n);
+}
+
+static long clk_fd_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ struct clk_fractional_divider *fd = to_clk_fd(hw);
+ unsigned long m, n;
+ u64 ret;
+
+ if (!rate || rate >= *parent_rate)
+ return *parent_rate;
+
+ if (fd->approximation)
+ fd->approximation(hw, rate, parent_rate, &m, &n);
+ else
+ clk_fd_general_approximation(hw, rate, parent_rate, &m, &n);
ret = (u64)*parent_rate * m;
do_div(ret, n);
diff --git a/drivers/clk/clk-gate.c b/drivers/clk/clk-gate.c
index 4e0c054a787c..dd82485e09a1 100644
--- a/drivers/clk/clk-gate.c
+++ b/drivers/clk/clk-gate.c
@@ -86,7 +86,7 @@ static void clk_gate_disable(struct clk_hw *hw)
clk_gate_endisable(hw, 0);
}
-static int clk_gate_is_enabled(struct clk_hw *hw)
+int clk_gate_is_enabled(struct clk_hw *hw)
{
u32 reg;
struct clk_gate *gate = to_clk_gate(hw);
@@ -101,6 +101,7 @@ static int clk_gate_is_enabled(struct clk_hw *hw)
return reg ? 1 : 0;
}
+EXPORT_SYMBOL_GPL(clk_gate_is_enabled);
const struct clk_ops clk_gate_ops = {
.enable = clk_gate_enable,
diff --git a/drivers/clk/clk-gemini.c b/drivers/clk/clk-gemini.c
index b4cf2f699a21..f940e5af845b 100644
--- a/drivers/clk/clk-gemini.c
+++ b/drivers/clk/clk-gemini.c
@@ -37,7 +37,6 @@ static DEFINE_SPINLOCK(gemini_clk_lock);
#define GEMINI_GLOBAL_MISC_CONTROL 0x30
#define PCI_CLK_66MHZ BIT(18)
-#define PCI_CLK_OE BIT(17)
#define GEMINI_GLOBAL_CLOCK_CONTROL 0x34
#define PCI_CLKRUN_EN BIT(16)
@@ -159,9 +158,6 @@ static int gemini_pci_enable(struct clk_hw *hw)
regmap_update_bits(pciclk->map, GEMINI_GLOBAL_CLOCK_CONTROL,
0, PCI_CLKRUN_EN);
- regmap_update_bits(pciclk->map,
- GEMINI_GLOBAL_MISC_CONTROL,
- 0, PCI_CLK_OE);
return 0;
}
@@ -169,9 +165,6 @@ static void gemini_pci_disable(struct clk_hw *hw)
{
struct clk_gemini_pci *pciclk = to_pciclk(hw);
- regmap_update_bits(pciclk->map,
- GEMINI_GLOBAL_MISC_CONTROL,
- PCI_CLK_OE, 0);
regmap_update_bits(pciclk->map, GEMINI_GLOBAL_CLOCK_CONTROL,
PCI_CLKRUN_EN, 0);
}
diff --git a/drivers/clk/clk-hsdk-pll.c b/drivers/clk/clk-hsdk-pll.c
new file mode 100644
index 000000000000..bbf237173b37
--- /dev/null
+++ b/drivers/clk/clk-hsdk-pll.c
@@ -0,0 +1,431 @@
+/*
+ * Synopsys HSDK SDP Generic PLL clock driver
+ *
+ * Copyright (C) 2017 Synopsys
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#define CGU_PLL_CTRL 0x000 /* ARC PLL control register */
+#define CGU_PLL_STATUS 0x004 /* ARC PLL status register */
+#define CGU_PLL_FMEAS 0x008 /* ARC PLL frequency measurement register */
+#define CGU_PLL_MON 0x00C /* ARC PLL monitor register */
+
+#define CGU_PLL_CTRL_ODIV_SHIFT 2
+#define CGU_PLL_CTRL_IDIV_SHIFT 4
+#define CGU_PLL_CTRL_FBDIV_SHIFT 9
+#define CGU_PLL_CTRL_BAND_SHIFT 20
+
+#define CGU_PLL_CTRL_ODIV_MASK GENMASK(3, CGU_PLL_CTRL_ODIV_SHIFT)
+#define CGU_PLL_CTRL_IDIV_MASK GENMASK(8, CGU_PLL_CTRL_IDIV_SHIFT)
+#define CGU_PLL_CTRL_FBDIV_MASK GENMASK(15, CGU_PLL_CTRL_FBDIV_SHIFT)
+
+#define CGU_PLL_CTRL_PD BIT(0)
+#define CGU_PLL_CTRL_BYPASS BIT(1)
+
+#define CGU_PLL_STATUS_LOCK BIT(0)
+#define CGU_PLL_STATUS_ERR BIT(1)
+
+#define HSDK_PLL_MAX_LOCK_TIME 100 /* 100 us */
+
+#define CGU_PLL_SOURCE_MAX 1
+
+#define CORE_IF_CLK_THRESHOLD_HZ 500000000
+#define CREG_CORE_IF_CLK_DIV_1 0x0
+#define CREG_CORE_IF_CLK_DIV_2 0x1
+
+struct hsdk_pll_cfg {
+ u32 rate;
+ u32 idiv;
+ u32 fbdiv;
+ u32 odiv;
+ u32 band;
+};
+
+static const struct hsdk_pll_cfg asdt_pll_cfg[] = {
+ { 100000000, 0, 11, 3, 0 },
+ { 133000000, 0, 15, 3, 0 },
+ { 200000000, 1, 47, 3, 0 },
+ { 233000000, 1, 27, 2, 0 },
+ { 300000000, 1, 35, 2, 0 },
+ { 333000000, 1, 39, 2, 0 },
+ { 400000000, 1, 47, 2, 0 },
+ { 500000000, 0, 14, 1, 0 },
+ { 600000000, 0, 17, 1, 0 },
+ { 700000000, 0, 20, 1, 0 },
+ { 800000000, 0, 23, 1, 0 },
+ { 900000000, 1, 26, 0, 0 },
+ { 1000000000, 1, 29, 0, 0 },
+ { 1100000000, 1, 32, 0, 0 },
+ { 1200000000, 1, 35, 0, 0 },
+ { 1300000000, 1, 38, 0, 0 },
+ { 1400000000, 1, 41, 0, 0 },
+ { 1500000000, 1, 44, 0, 0 },
+ { 1600000000, 1, 47, 0, 0 },
+ {}
+};
+
+static const struct hsdk_pll_cfg hdmi_pll_cfg[] = {
+ { 297000000, 0, 21, 2, 0 },
+ { 540000000, 0, 19, 1, 0 },
+ { 594000000, 0, 21, 1, 0 },
+ {}
+};
+
+struct hsdk_pll_clk {
+ struct clk_hw hw;
+ void __iomem *regs;
+ void __iomem *spec_regs;
+ const struct hsdk_pll_devdata *pll_devdata;
+ struct device *dev;
+};
+
+struct hsdk_pll_devdata {
+ const struct hsdk_pll_cfg *pll_cfg;
+ int (*update_rate)(struct hsdk_pll_clk *clk, unsigned long rate,
+ const struct hsdk_pll_cfg *cfg);
+};
+
+static int hsdk_pll_core_update_rate(struct hsdk_pll_clk *, unsigned long,
+ const struct hsdk_pll_cfg *);
+static int hsdk_pll_comm_update_rate(struct hsdk_pll_clk *, unsigned long,
+ const struct hsdk_pll_cfg *);
+
+static const struct hsdk_pll_devdata core_pll_devdata = {
+ .pll_cfg = asdt_pll_cfg,
+ .update_rate = hsdk_pll_core_update_rate,
+};
+
+static const struct hsdk_pll_devdata sdt_pll_devdata = {
+ .pll_cfg = asdt_pll_cfg,
+ .update_rate = hsdk_pll_comm_update_rate,
+};
+
+static const struct hsdk_pll_devdata hdmi_pll_devdata = {
+ .pll_cfg = hdmi_pll_cfg,
+ .update_rate = hsdk_pll_comm_update_rate,
+};
+
+static inline void hsdk_pll_write(struct hsdk_pll_clk *clk, u32 reg, u32 val)
+{
+ iowrite32(val, clk->regs + reg);
+}
+
+static inline u32 hsdk_pll_read(struct hsdk_pll_clk *clk, u32 reg)
+{
+ return ioread32(clk->regs + reg);
+}
+
+static inline void hsdk_pll_set_cfg(struct hsdk_pll_clk *clk,
+ const struct hsdk_pll_cfg *cfg)
+{
+ u32 val = 0;
+
+ /* Powerdown and Bypass bits should be cleared */
+ val |= cfg->idiv << CGU_PLL_CTRL_IDIV_SHIFT;
+ val |= cfg->fbdiv << CGU_PLL_CTRL_FBDIV_SHIFT;
+ val |= cfg->odiv << CGU_PLL_CTRL_ODIV_SHIFT;
+ val |= cfg->band << CGU_PLL_CTRL_BAND_SHIFT;
+
+ dev_dbg(clk->dev, "write configurarion: %#x\n", val);
+
+ hsdk_pll_write(clk, CGU_PLL_CTRL, val);
+}
+
+static inline bool hsdk_pll_is_locked(struct hsdk_pll_clk *clk)
+{
+ return !!(hsdk_pll_read(clk, CGU_PLL_STATUS) & CGU_PLL_STATUS_LOCK);
+}
+
+static inline bool hsdk_pll_is_err(struct hsdk_pll_clk *clk)
+{
+ return !!(hsdk_pll_read(clk, CGU_PLL_STATUS) & CGU_PLL_STATUS_ERR);
+}
+
+static inline struct hsdk_pll_clk *to_hsdk_pll_clk(struct clk_hw *hw)
+{
+ return container_of(hw, struct hsdk_pll_clk, hw);
+}
+
+static unsigned long hsdk_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ u32 val;
+ u64 rate;
+ u32 idiv, fbdiv, odiv;
+ struct hsdk_pll_clk *clk = to_hsdk_pll_clk(hw);
+
+ val = hsdk_pll_read(clk, CGU_PLL_CTRL);
+
+ dev_dbg(clk->dev, "current configurarion: %#x\n", val);
+
+ /* Check if PLL is disabled */
+ if (val & CGU_PLL_CTRL_PD)
+ return 0;
+
+ /* Check if PLL is bypassed */
+ if (val & CGU_PLL_CTRL_BYPASS)
+ return parent_rate;
+
+ /* input divider = reg.idiv + 1 */
+ idiv = 1 + ((val & CGU_PLL_CTRL_IDIV_MASK) >> CGU_PLL_CTRL_IDIV_SHIFT);
+ /* fb divider = 2*(reg.fbdiv + 1) */
+ fbdiv = 2 * (1 + ((val & CGU_PLL_CTRL_FBDIV_MASK) >> CGU_PLL_CTRL_FBDIV_SHIFT));
+ /* output divider = 2^(reg.odiv) */
+ odiv = 1 << ((val & CGU_PLL_CTRL_ODIV_MASK) >> CGU_PLL_CTRL_ODIV_SHIFT);
+
+ rate = (u64)parent_rate * fbdiv;
+ do_div(rate, idiv * odiv);
+
+ return rate;
+}
+
+static long hsdk_pll_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ int i;
+ unsigned long best_rate;
+ struct hsdk_pll_clk *clk = to_hsdk_pll_clk(hw);
+ const struct hsdk_pll_cfg *pll_cfg = clk->pll_devdata->pll_cfg;
+
+ if (pll_cfg[0].rate == 0)
+ return -EINVAL;
+
+ best_rate = pll_cfg[0].rate;
+
+ for (i = 1; pll_cfg[i].rate != 0; i++) {
+ if (abs(rate - pll_cfg[i].rate) < abs(rate - best_rate))
+ best_rate = pll_cfg[i].rate;
+ }
+
+ dev_dbg(clk->dev, "chosen best rate: %lu\n", best_rate);
+
+ return best_rate;
+}
+
+static int hsdk_pll_comm_update_rate(struct hsdk_pll_clk *clk,
+ unsigned long rate,
+ const struct hsdk_pll_cfg *cfg)
+{
+ hsdk_pll_set_cfg(clk, cfg);
+
+ /*
+ * Wait until CGU relocks and check error status.
+ * If after timeout CGU is unlocked yet return error.
+ */
+ udelay(HSDK_PLL_MAX_LOCK_TIME);
+ if (!hsdk_pll_is_locked(clk))
+ return -ETIMEDOUT;
+
+ if (hsdk_pll_is_err(clk))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int hsdk_pll_core_update_rate(struct hsdk_pll_clk *clk,
+ unsigned long rate,
+ const struct hsdk_pll_cfg *cfg)
+{
+ /*
+ * When core clock exceeds 500MHz, the divider for the interface
+ * clock must be programmed to div-by-2.
+ */
+ if (rate > CORE_IF_CLK_THRESHOLD_HZ)
+ iowrite32(CREG_CORE_IF_CLK_DIV_2, clk->spec_regs);
+
+ hsdk_pll_set_cfg(clk, cfg);
+
+ /*
+ * Wait until CGU relocks and check error status.
+ * If after timeout CGU is unlocked yet return error.
+ */
+ udelay(HSDK_PLL_MAX_LOCK_TIME);
+ if (!hsdk_pll_is_locked(clk))
+ return -ETIMEDOUT;
+
+ if (hsdk_pll_is_err(clk))
+ return -EINVAL;
+
+ /*
+ * Program divider to div-by-1 if we succesfuly set core clock below
+ * 500MHz threshold.
+ */
+ if (rate <= CORE_IF_CLK_THRESHOLD_HZ)
+ iowrite32(CREG_CORE_IF_CLK_DIV_1, clk->spec_regs);
+
+ return 0;
+}
+
+static int hsdk_pll_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ int i;
+ struct hsdk_pll_clk *clk = to_hsdk_pll_clk(hw);
+ const struct hsdk_pll_cfg *pll_cfg = clk->pll_devdata->pll_cfg;
+
+ for (i = 0; pll_cfg[i].rate != 0; i++) {
+ if (pll_cfg[i].rate == rate) {
+ return clk->pll_devdata->update_rate(clk, rate,
+ &pll_cfg[i]);
+ }
+ }
+
+ dev_err(clk->dev, "invalid rate=%ld, parent_rate=%ld\n", rate,
+ parent_rate);
+
+ return -EINVAL;
+}
+
+static const struct clk_ops hsdk_pll_ops = {
+ .recalc_rate = hsdk_pll_recalc_rate,
+ .round_rate = hsdk_pll_round_rate,
+ .set_rate = hsdk_pll_set_rate,
+};
+
+static int hsdk_pll_clk_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct resource *mem;
+ const char *parent_name;
+ unsigned int num_parents;
+ struct hsdk_pll_clk *pll_clk;
+ struct clk_init_data init = { };
+ struct device *dev = &pdev->dev;
+
+ pll_clk = devm_kzalloc(dev, sizeof(*pll_clk), GFP_KERNEL);
+ if (!pll_clk)
+ return -ENOMEM;
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ pll_clk->regs = devm_ioremap_resource(dev, mem);
+ if (IS_ERR(pll_clk->regs))
+ return PTR_ERR(pll_clk->regs);
+
+ init.name = dev->of_node->name;
+ init.ops = &hsdk_pll_ops;
+ parent_name = of_clk_get_parent_name(dev->of_node, 0);
+ init.parent_names = &parent_name;
+ num_parents = of_clk_get_parent_count(dev->of_node);
+ if (num_parents == 0 || num_parents > CGU_PLL_SOURCE_MAX) {
+ dev_err(dev, "wrong clock parents number: %u\n", num_parents);
+ return -EINVAL;
+ }
+ init.num_parents = num_parents;
+
+ pll_clk->hw.init = &init;
+ pll_clk->dev = dev;
+ pll_clk->pll_devdata = of_device_get_match_data(dev);
+
+ if (!pll_clk->pll_devdata) {
+ dev_err(dev, "No OF match data provided\n");
+ return -EINVAL;
+ }
+
+ ret = devm_clk_hw_register(dev, &pll_clk->hw);
+ if (ret) {
+ dev_err(dev, "failed to register %s clock\n", init.name);
+ return ret;
+ }
+
+ return of_clk_add_hw_provider(dev->of_node, of_clk_hw_simple_get,
+ &pll_clk->hw);
+}
+
+static int hsdk_pll_clk_remove(struct platform_device *pdev)
+{
+ of_clk_del_provider(pdev->dev.of_node);
+ return 0;
+}
+
+static void __init of_hsdk_pll_clk_setup(struct device_node *node)
+{
+ int ret;
+ const char *parent_name;
+ unsigned int num_parents;
+ struct hsdk_pll_clk *pll_clk;
+ struct clk_init_data init = { };
+
+ pll_clk = kzalloc(sizeof(*pll_clk), GFP_KERNEL);
+ if (!pll_clk)
+ return;
+
+ pll_clk->regs = of_iomap(node, 0);
+ if (!pll_clk->regs) {
+ pr_err("failed to map pll registers\n");
+ goto err_free_pll_clk;
+ }
+
+ pll_clk->spec_regs = of_iomap(node, 1);
+ if (!pll_clk->spec_regs) {
+ pr_err("failed to map pll registers\n");
+ goto err_unmap_comm_regs;
+ }
+
+ init.name = node->name;
+ init.ops = &hsdk_pll_ops;
+ parent_name = of_clk_get_parent_name(node, 0);
+ init.parent_names = &parent_name;
+ num_parents = of_clk_get_parent_count(node);
+ if (num_parents > CGU_PLL_SOURCE_MAX) {
+ pr_err("too much clock parents: %u\n", num_parents);
+ goto err_unmap_spec_regs;
+ }
+ init.num_parents = num_parents;
+
+ pll_clk->hw.init = &init;
+ pll_clk->pll_devdata = &core_pll_devdata;
+
+ ret = clk_hw_register(NULL, &pll_clk->hw);
+ if (ret) {
+ pr_err("failed to register %s clock\n", node->name);
+ goto err_unmap_spec_regs;
+ }
+
+ ret = of_clk_add_hw_provider(node, of_clk_hw_simple_get, &pll_clk->hw);
+ if (ret) {
+ pr_err("failed to add hw provider for %s clock\n", node->name);
+ goto err_unmap_spec_regs;
+ }
+
+ return;
+
+err_unmap_spec_regs:
+ iounmap(pll_clk->spec_regs);
+err_unmap_comm_regs:
+ iounmap(pll_clk->regs);
+err_free_pll_clk:
+ kfree(pll_clk);
+}
+
+/* Core PLL needed early for ARC cpus timers */
+CLK_OF_DECLARE(hsdk_pll_clock, "snps,hsdk-core-pll-clock",
+of_hsdk_pll_clk_setup);
+
+static const struct of_device_id hsdk_pll_clk_id[] = {
+ { .compatible = "snps,hsdk-gp-pll-clock", .data = &sdt_pll_devdata},
+ { .compatible = "snps,hsdk-hdmi-pll-clock", .data = &hdmi_pll_devdata},
+ { }
+};
+
+static struct platform_driver hsdk_pll_clk_driver = {
+ .driver = {
+ .name = "hsdk-gp-pll-clock",
+ .of_match_table = hsdk_pll_clk_id,
+ },
+ .probe = hsdk_pll_clk_probe,
+ .remove = hsdk_pll_clk_remove,
+};
+builtin_platform_driver(hsdk_pll_clk_driver);
diff --git a/drivers/clk/clk-mb86s7x.c b/drivers/clk/clk-mb86s7x.c
deleted file mode 100644
index 2a83a3ff1d09..000000000000
--- a/drivers/clk/clk-mb86s7x.c
+++ /dev/null
@@ -1,390 +0,0 @@
-/*
- * Copyright (C) 2013-2015 FUJITSU SEMICONDUCTOR LIMITED
- * Copyright (C) 2015 Linaro Ltd.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/clkdev.h>
-#include <linux/err.h>
-#include <linux/io.h>
-#include <linux/of.h>
-#include <linux/cpu.h>
-#include <linux/clk-provider.h>
-#include <linux/spinlock.h>
-#include <linux/module.h>
-#include <linux/topology.h>
-#include <linux/mailbox_client.h>
-#include <linux/platform_device.h>
-
-#include <soc/mb86s7x/scb_mhu.h>
-
-#define to_crg_clk(p) container_of(p, struct crg_clk, hw)
-#define to_clc_clk(p) container_of(p, struct cl_clk, hw)
-
-struct mb86s7x_peri_clk {
- u32 payload_size;
- u32 cntrlr;
- u32 domain;
- u32 port;
- u32 en;
- u64 frequency;
-} __packed __aligned(4);
-
-struct hack_rate {
- unsigned clk_id;
- unsigned long rate;
- int gated;
-};
-
-struct crg_clk {
- struct clk_hw hw;
- u8 cntrlr, domain, port;
-};
-
-static int crg_gate_control(struct clk_hw *hw, int en)
-{
- struct crg_clk *crgclk = to_crg_clk(hw);
- struct mb86s7x_peri_clk cmd;
- int ret;
-
- cmd.payload_size = sizeof(cmd);
- cmd.cntrlr = crgclk->cntrlr;
- cmd.domain = crgclk->domain;
- cmd.port = crgclk->port;
- cmd.en = en;
-
- /* Port is UngatedCLK */
- if (cmd.port == 8)
- return en ? 0 : -EINVAL;
-
- pr_debug("%s:%d CMD Cntrlr-%u Dom-%u Port-%u En-%u}\n",
- __func__, __LINE__, cmd.cntrlr,
- cmd.domain, cmd.port, cmd.en);
-
- ret = mb86s7x_send_packet(CMD_PERI_CLOCK_GATE_SET_REQ,
- &cmd, sizeof(cmd));
- if (ret < 0) {
- pr_err("%s:%d failed!\n", __func__, __LINE__);
- return ret;
- }
-
- pr_debug("%s:%d REP Cntrlr-%u Dom-%u Port-%u En-%u}\n",
- __func__, __LINE__, cmd.cntrlr,
- cmd.domain, cmd.port, cmd.en);
-
- /* If the request was rejected */
- if (cmd.en != en)
- ret = -EINVAL;
- else
- ret = 0;
-
- return ret;
-}
-
-static int crg_port_prepare(struct clk_hw *hw)
-{
- return crg_gate_control(hw, 1);
-}
-
-static void crg_port_unprepare(struct clk_hw *hw)
-{
- crg_gate_control(hw, 0);
-}
-
-static int
-crg_rate_control(struct clk_hw *hw, int set, unsigned long *rate)
-{
- struct crg_clk *crgclk = to_crg_clk(hw);
- struct mb86s7x_peri_clk cmd;
- int code, ret;
-
- cmd.payload_size = sizeof(cmd);
- cmd.cntrlr = crgclk->cntrlr;
- cmd.domain = crgclk->domain;
- cmd.port = crgclk->port;
- cmd.frequency = *rate;
-
- if (set) {
- code = CMD_PERI_CLOCK_RATE_SET_REQ;
- pr_debug("%s:%d CMD Cntrlr-%u Dom-%u Port-%u Rate-SET %lluHz}\n",
- __func__, __LINE__, cmd.cntrlr,
- cmd.domain, cmd.port, cmd.frequency);
- } else {
- code = CMD_PERI_CLOCK_RATE_GET_REQ;
- pr_debug("%s:%d CMD Cntrlr-%u Dom-%u Port-%u Rate-GET}\n",
- __func__, __LINE__, cmd.cntrlr,
- cmd.domain, cmd.port);
- }
-
- ret = mb86s7x_send_packet(code, &cmd, sizeof(cmd));
- if (ret < 0) {
- pr_err("%s:%d failed!\n", __func__, __LINE__);
- return ret;
- }
-
- if (set)
- pr_debug("%s:%d REP Cntrlr-%u Dom-%u Port-%u Rate-SET %lluHz}\n",
- __func__, __LINE__, cmd.cntrlr,
- cmd.domain, cmd.port, cmd.frequency);
- else
- pr_debug("%s:%d REP Cntrlr-%u Dom-%u Port-%u Rate-GOT %lluHz}\n",
- __func__, __LINE__, cmd.cntrlr,
- cmd.domain, cmd.port, cmd.frequency);
-
- *rate = cmd.frequency;
- return 0;
-}
-
-static unsigned long
-crg_port_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
-{
- unsigned long rate;
-
- crg_rate_control(hw, 0, &rate);
-
- return rate;
-}
-
-static long
-crg_port_round_rate(struct clk_hw *hw,
- unsigned long rate, unsigned long *pr)
-{
- return rate;
-}
-
-static int
-crg_port_set_rate(struct clk_hw *hw,
- unsigned long rate, unsigned long parent_rate)
-{
- return crg_rate_control(hw, 1, &rate);
-}
-
-const struct clk_ops crg_port_ops = {
- .prepare = crg_port_prepare,
- .unprepare = crg_port_unprepare,
- .recalc_rate = crg_port_recalc_rate,
- .round_rate = crg_port_round_rate,
- .set_rate = crg_port_set_rate,
-};
-
-struct mb86s70_crg11 {
- struct mutex lock; /* protects CLK populating and searching */
-};
-
-static struct clk *crg11_get(struct of_phandle_args *clkspec, void *data)
-{
- struct mb86s70_crg11 *crg11 = data;
- struct clk_init_data init;
- u32 cntrlr, domain, port;
- struct crg_clk *crgclk;
- struct clk *clk;
- char clkp[20];
-
- if (clkspec->args_count != 3)
- return ERR_PTR(-EINVAL);
-
- cntrlr = clkspec->args[0];
- domain = clkspec->args[1];
- port = clkspec->args[2];
-
- if (port > 7)
- snprintf(clkp, 20, "UngatedCLK%d_%X", cntrlr, domain);
- else
- snprintf(clkp, 20, "CLK%d_%X_%d", cntrlr, domain, port);
-
- mutex_lock(&crg11->lock);
-
- clk = __clk_lookup(clkp);
- if (clk) {
- mutex_unlock(&crg11->lock);
- return clk;
- }
-
- crgclk = kzalloc(sizeof(*crgclk), GFP_KERNEL);
- if (!crgclk) {
- mutex_unlock(&crg11->lock);
- return ERR_PTR(-ENOMEM);
- }
-
- init.name = clkp;
- init.num_parents = 0;
- init.ops = &crg_port_ops;
- init.flags = 0;
- crgclk->hw.init = &init;
- crgclk->cntrlr = cntrlr;
- crgclk->domain = domain;
- crgclk->port = port;
- clk = clk_register(NULL, &crgclk->hw);
- if (IS_ERR(clk))
- pr_err("%s:%d Error!\n", __func__, __LINE__);
- else
- pr_debug("Registered %s\n", clkp);
-
- clk_register_clkdev(clk, clkp, NULL);
- mutex_unlock(&crg11->lock);
- return clk;
-}
-
-static void __init crg_port_init(struct device_node *node)
-{
- struct mb86s70_crg11 *crg11;
-
- crg11 = kzalloc(sizeof(*crg11), GFP_KERNEL);
- if (!crg11)
- return;
-
- mutex_init(&crg11->lock);
-
- of_clk_add_provider(node, crg11_get, crg11);
-}
-CLK_OF_DECLARE(crg11_gate, "fujitsu,mb86s70-crg11", crg_port_init);
-
-struct cl_clk {
- struct clk_hw hw;
- int cluster;
-};
-
-struct mb86s7x_cpu_freq {
- u32 payload_size;
- u32 cluster_class;
- u32 cluster_id;
- u32 cpu_id;
- u64 frequency;
-};
-
-static void mhu_cluster_rate(struct clk_hw *hw, unsigned long *rate, int get)
-{
- struct cl_clk *clc = to_clc_clk(hw);
- struct mb86s7x_cpu_freq cmd;
- int code, ret;
-
- cmd.payload_size = sizeof(cmd);
- cmd.cluster_class = 0;
- cmd.cluster_id = clc->cluster;
- cmd.cpu_id = 0;
- cmd.frequency = *rate;
-
- if (get)
- code = CMD_CPU_CLOCK_RATE_GET_REQ;
- else
- code = CMD_CPU_CLOCK_RATE_SET_REQ;
-
- pr_debug("%s:%d CMD Cl_Class-%u CL_ID-%u CPU_ID-%u Freq-%llu}\n",
- __func__, __LINE__, cmd.cluster_class,
- cmd.cluster_id, cmd.cpu_id, cmd.frequency);
-
- ret = mb86s7x_send_packet(code, &cmd, sizeof(cmd));
- if (ret < 0) {
- pr_err("%s:%d failed!\n", __func__, __LINE__);
- return;
- }
-
- pr_debug("%s:%d REP Cl_Class-%u CL_ID-%u CPU_ID-%u Freq-%llu}\n",
- __func__, __LINE__, cmd.cluster_class,
- cmd.cluster_id, cmd.cpu_id, cmd.frequency);
-
- *rate = cmd.frequency;
-}
-
-static unsigned long
-clc_recalc_rate(struct clk_hw *hw, unsigned long unused)
-{
- unsigned long rate;
-
- mhu_cluster_rate(hw, &rate, 1);
- return rate;
-}
-
-static long
-clc_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *unused)
-{
- return rate;
-}
-
-static int
-clc_set_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long unused)
-{
- unsigned long res = rate;
-
- mhu_cluster_rate(hw, &res, 0);
-
- return (res == rate) ? 0 : -EINVAL;
-}
-
-static struct clk_ops clk_clc_ops = {
- .recalc_rate = clc_recalc_rate,
- .round_rate = clc_round_rate,
- .set_rate = clc_set_rate,
-};
-
-static struct clk_hw *mb86s7x_clclk_register(struct device *cpu_dev)
-{
- struct clk_init_data init;
- struct cl_clk *clc;
- int ret;
-
- clc = kzalloc(sizeof(*clc), GFP_KERNEL);
- if (!clc)
- return ERR_PTR(-ENOMEM);
-
- clc->hw.init = &init;
- clc->cluster = topology_physical_package_id(cpu_dev->id);
-
- init.name = dev_name(cpu_dev);
- init.ops = &clk_clc_ops;
- init.flags = CLK_GET_RATE_NOCACHE;
- init.num_parents = 0;
-
- ret = devm_clk_hw_register(cpu_dev, &clc->hw);
- if (ret)
- return ERR_PTR(ret);
- return &clc->hw;
-}
-
-static int mb86s7x_clclk_of_init(void)
-{
- int cpu, ret = -ENODEV;
- struct device_node *np;
- struct clk_hw *hw;
-
- np = of_find_compatible_node(NULL, NULL, "fujitsu,mb86s70-scb-1.0");
- if (!np || !of_device_is_available(np))
- goto exit;
-
- for_each_possible_cpu(cpu) {
- struct device *cpu_dev = get_cpu_device(cpu);
-
- if (!cpu_dev) {
- pr_err("failed to get cpu%d device\n", cpu);
- continue;
- }
-
- hw = mb86s7x_clclk_register(cpu_dev);
- if (IS_ERR(hw)) {
- pr_err("failed to register cpu%d clock\n", cpu);
- continue;
- }
- if (clk_hw_register_clkdev(hw, NULL, dev_name(cpu_dev))) {
- pr_err("failed to register cpu%d clock lookup\n", cpu);
- continue;
- }
- pr_debug("registered clk for %s\n", dev_name(cpu_dev));
- }
- ret = 0;
-
- platform_device_register_simple("arm-bL-cpufreq-dt", -1, NULL, 0);
-exit:
- of_node_put(np);
- return ret;
-}
-module_init(mb86s7x_clclk_of_init);
diff --git a/drivers/clk/clk-moxart.c b/drivers/clk/clk-moxart.c
index b86dac851116..58428d0043fd 100644
--- a/drivers/clk/clk-moxart.c
+++ b/drivers/clk/clk-moxart.c
@@ -18,7 +18,7 @@
static void __init moxart_of_pll_clk_init(struct device_node *node)
{
- static void __iomem *base;
+ void __iomem *base;
struct clk_hw *hw;
struct clk *ref_clk;
unsigned int mul;
@@ -30,7 +30,7 @@ static void __init moxart_of_pll_clk_init(struct device_node *node)
base = of_iomap(node, 0);
if (!base) {
- pr_err("%s: of_iomap failed\n", node->full_name);
+ pr_err("%pOF: of_iomap failed\n", node);
return;
}
@@ -39,13 +39,13 @@ static void __init moxart_of_pll_clk_init(struct device_node *node)
ref_clk = of_clk_get(node, 0);
if (IS_ERR(ref_clk)) {
- pr_err("%s: of_clk_get failed\n", node->full_name);
+ pr_err("%pOF: of_clk_get failed\n", node);
return;
}
hw = clk_hw_register_fixed_factor(NULL, name, parent_name, 0, mul, 1);
if (IS_ERR(hw)) {
- pr_err("%s: failed to register clock\n", node->full_name);
+ pr_err("%pOF: failed to register clock\n", node);
return;
}
@@ -57,7 +57,7 @@ CLK_OF_DECLARE(moxart_pll_clock, "moxa,moxart-pll-clock",
static void __init moxart_of_apb_clk_init(struct device_node *node)
{
- static void __iomem *base;
+ void __iomem *base;
struct clk_hw *hw;
struct clk *pll_clk;
unsigned int div, val;
@@ -70,7 +70,7 @@ static void __init moxart_of_apb_clk_init(struct device_node *node)
base = of_iomap(node, 0);
if (!base) {
- pr_err("%s: of_iomap failed\n", node->full_name);
+ pr_err("%pOF: of_iomap failed\n", node);
return;
}
@@ -83,13 +83,13 @@ static void __init moxart_of_apb_clk_init(struct device_node *node)
pll_clk = of_clk_get(node, 0);
if (IS_ERR(pll_clk)) {
- pr_err("%s: of_clk_get failed\n", node->full_name);
+ pr_err("%pOF: of_clk_get failed\n", node);
return;
}
hw = clk_hw_register_fixed_factor(NULL, name, parent_name, 0, 1, div);
if (IS_ERR(hw)) {
- pr_err("%s: failed to register clock\n", node->full_name);
+ pr_err("%pOF: failed to register clock\n", node);
return;
}
diff --git a/drivers/clk/clk-qoriq.c b/drivers/clk/clk-qoriq.c
index f3931e38fac0..b0ea753b8709 100644
--- a/drivers/clk/clk-qoriq.c
+++ b/drivers/clk/clk-qoriq.c
@@ -12,6 +12,7 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
#include <linux/fsl/guts.h>
#include <linux/io.h>
#include <linux/kernel.h>
@@ -537,6 +538,17 @@ static const struct clockgen_chipinfo chipinfo[] = {
.flags = CG_PLL_8BIT,
},
{
+ .compat = "fsl,ls1088a-clockgen",
+ .cmux_groups = {
+ &clockgen2_cmux_cga12
+ },
+ .cmux_to_group = {
+ 0, 0, -1
+ },
+ .pll_mask = 0x07,
+ .flags = CG_VER3 | CG_LITTLE_ENDIAN,
+ },
+ {
.compat = "fsl,ls1012a-clockgen",
.cmux_groups = {
&ls1012a_cmux
@@ -1113,6 +1125,7 @@ static void __init create_one_pll(struct clockgen *cg, int idx)
for (i = 0; i < ARRAY_SIZE(pll->div); i++) {
struct clk *clk;
+ int ret;
snprintf(pll->div[i].name, sizeof(pll->div[i].name),
"cg-pll%d-div%d", idx, i + 1);
@@ -1126,6 +1139,11 @@ static void __init create_one_pll(struct clockgen *cg, int idx)
}
pll->div[i].clk = clk;
+ ret = clk_register_clkdev(clk, pll->div[i].name, NULL);
+ if (ret != 0)
+ pr_err("%s: %s: register to lookup table failed %ld\n",
+ __func__, pll->div[i].name, PTR_ERR(clk));
+
}
}
@@ -1348,8 +1366,7 @@ static void __init clockgen_init(struct device_node *np)
}
if (i == ARRAY_SIZE(chipinfo)) {
- pr_err("%s: unknown clockgen node %s\n", __func__,
- np->full_name);
+ pr_err("%s: unknown clockgen node %pOF\n", __func__, np);
goto err;
}
clockgen.info = chipinfo[i];
@@ -1362,8 +1379,8 @@ static void __init clockgen_init(struct device_node *np)
if (guts) {
clockgen.guts = of_iomap(guts, 0);
if (!clockgen.guts) {
- pr_err("%s: Couldn't map %s regs\n", __func__,
- guts->full_name);
+ pr_err("%s: Couldn't map %pOF regs\n", __func__,
+ guts);
}
}
@@ -1398,6 +1415,7 @@ CLK_OF_DECLARE(qoriq_clockgen_ls1012a, "fsl,ls1012a-clockgen", clockgen_init);
CLK_OF_DECLARE(qoriq_clockgen_ls1021a, "fsl,ls1021a-clockgen", clockgen_init);
CLK_OF_DECLARE(qoriq_clockgen_ls1043a, "fsl,ls1043a-clockgen", clockgen_init);
CLK_OF_DECLARE(qoriq_clockgen_ls1046a, "fsl,ls1046a-clockgen", clockgen_init);
+CLK_OF_DECLARE(qoriq_clockgen_ls1088a, "fsl,ls1088a-clockgen", clockgen_init);
CLK_OF_DECLARE(qoriq_clockgen_ls2080a, "fsl,ls2080a-clockgen", clockgen_init);
/* Legacy nodes */
diff --git a/drivers/clk/clk-si5351.c b/drivers/clk/clk-si5351.c
index 2492442eea77..20d90769cced 100644
--- a/drivers/clk/clk-si5351.c
+++ b/drivers/clk/clk-si5351.c
@@ -519,6 +519,11 @@ static int si5351_pll_set_rate(struct clk_hw *hw, unsigned long rate,
SI5351_CLK_INTEGER_MODE,
(hwdata->params.p2 == 0) ? SI5351_CLK_INTEGER_MODE : 0);
+ /* Do a pll soft reset on the affected pll */
+ si5351_reg_write(hwdata->drvdata, SI5351_PLL_RESET,
+ hwdata->num == 0 ? SI5351_PLL_RESET_A :
+ SI5351_PLL_RESET_B);
+
dev_dbg(&hwdata->drvdata->client->dev,
"%s - %s: p1 = %lu, p2 = %lu, p3 = %lu, parent_rate = %lu, rate = %lu\n",
__func__, clk_hw_get_name(hw),
@@ -1091,13 +1096,6 @@ static int si5351_clkout_set_rate(struct clk_hw *hw, unsigned long rate,
si5351_set_bits(hwdata->drvdata, SI5351_CLK0_CTRL + hwdata->num,
SI5351_CLK_POWERDOWN, 0);
- /*
- * Do a pll soft reset on both plls, needed in some cases to get
- * all outputs running.
- */
- si5351_reg_write(hwdata->drvdata, SI5351_PLL_RESET,
- SI5351_PLL_RESET_A | SI5351_PLL_RESET_B);
-
dev_dbg(&hwdata->drvdata->client->dev,
"%s - %s: rdiv = %u, parent_rate = %lu, rate = %lu\n",
__func__, clk_hw_get_name(hw), (1 << rdiv),
diff --git a/drivers/clk/clk-stm32f4.c b/drivers/clk/clk-stm32f4.c
index 68e2a4e499f1..96c6b6bc8f0e 100644
--- a/drivers/clk/clk-stm32f4.c
+++ b/drivers/clk/clk-stm32f4.c
@@ -1541,8 +1541,8 @@ static void __init stm32f4_rcc_init(struct device_node *np)
base + gd->offset, gd->bit_idx, 0, &stm32f4_clk_lock);
if (IS_ERR(clks[idx])) {
- pr_err("%s: Unable to register leaf clock %s\n",
- np->full_name, gd->name);
+ pr_err("%pOF: Unable to register leaf clock %s\n",
+ np, gd->name);
goto fail;
}
}
diff --git a/drivers/clk/clk-stm32h7.c b/drivers/clk/clk-stm32h7.c
new file mode 100644
index 000000000000..a94c3f56c590
--- /dev/null
+++ b/drivers/clk/clk-stm32h7.c
@@ -0,0 +1,1410 @@
+/*
+ * Copyright (C) Gabriel Fernandez 2017
+ * Author: Gabriel Fernandez <gabriel.fernandez@st.com>
+ *
+ * License terms: GPL V2.0.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/stm32h7-clks.h>
+
+/* Reset Clock Control Registers */
+#define RCC_CR 0x00
+#define RCC_CFGR 0x10
+#define RCC_D1CFGR 0x18
+#define RCC_D2CFGR 0x1C
+#define RCC_D3CFGR 0x20
+#define RCC_PLLCKSELR 0x28
+#define RCC_PLLCFGR 0x2C
+#define RCC_PLL1DIVR 0x30
+#define RCC_PLL1FRACR 0x34
+#define RCC_PLL2DIVR 0x38
+#define RCC_PLL2FRACR 0x3C
+#define RCC_PLL3DIVR 0x40
+#define RCC_PLL3FRACR 0x44
+#define RCC_D1CCIPR 0x4C
+#define RCC_D2CCIP1R 0x50
+#define RCC_D2CCIP2R 0x54
+#define RCC_D3CCIPR 0x58
+#define RCC_BDCR 0x70
+#define RCC_CSR 0x74
+#define RCC_AHB3ENR 0xD4
+#define RCC_AHB1ENR 0xD8
+#define RCC_AHB2ENR 0xDC
+#define RCC_AHB4ENR 0xE0
+#define RCC_APB3ENR 0xE4
+#define RCC_APB1LENR 0xE8
+#define RCC_APB1HENR 0xEC
+#define RCC_APB2ENR 0xF0
+#define RCC_APB4ENR 0xF4
+
+static DEFINE_SPINLOCK(stm32rcc_lock);
+
+static void __iomem *base;
+static struct clk_hw **hws;
+
+/* System clock parent */
+static const char * const sys_src[] = {
+ "hsi_ck", "csi_ck", "hse_ck", "pll1_p" };
+
+static const char * const tracein_src[] = {
+ "hsi_ck", "csi_ck", "hse_ck", "pll1_r" };
+
+static const char * const per_src[] = {
+ "hsi_ker", "csi_ker", "hse_ck", "disabled" };
+
+static const char * const pll_src[] = {
+ "hsi_ck", "csi_ck", "hse_ck", "no clock" };
+
+static const char * const sdmmc_src[] = { "pll1_q", "pll2_r" };
+
+static const char * const dsi_src[] = { "ck_dsi_phy", "pll2_q" };
+
+static const char * const qspi_src[] = {
+ "hclk", "pll1_q", "pll2_r", "per_ck" };
+
+static const char * const fmc_src[] = {
+ "hclk", "pll1_q", "pll2_r", "per_ck" };
+
+/* Kernel clock parent */
+static const char * const swp_src[] = { "pclk1", "hsi_ker" };
+
+static const char * const fdcan_src[] = { "hse_ck", "pll1_q", "pll2_q" };
+
+static const char * const dfsdm1_src[] = { "pclk2", "sys_ck" };
+
+static const char * const spdifrx_src[] = {
+ "pll1_q", "pll2_r", "pll3_r", "hsi_ker" };
+
+static const char *spi_src1[5] = {
+ "pll1_q", "pll2_p", "pll3_p", NULL, "per_ck" };
+
+static const char * const spi_src2[] = {
+ "pclk2", "pll2_q", "pll3_q", "hsi_ker", "csi_ker", "hse_ck" };
+
+static const char * const spi_src3[] = {
+ "pclk4", "pll2_q", "pll3_q", "hsi_ker", "csi_ker", "hse_ck" };
+
+static const char * const lptim_src1[] = {
+ "pclk1", "pll2_p", "pll3_r", "lse_ck", "lsi_ck", "per_ck" };
+
+static const char * const lptim_src2[] = {
+ "pclk4", "pll2_p", "pll3_r", "lse_ck", "lsi_ck", "per_ck" };
+
+static const char * const cec_src[] = {"lse_ck", "lsi_ck", "csi_ker_div122" };
+
+static const char * const usbotg_src[] = {"pll1_q", "pll3_q", "rc48_ck" };
+
+/* i2c 1,2,3 src */
+static const char * const i2c_src1[] = {
+ "pclk1", "pll3_r", "hsi_ker", "csi_ker" };
+
+static const char * const i2c_src2[] = {
+ "pclk4", "pll3_r", "hsi_ker", "csi_ker" };
+
+static const char * const rng_src[] = {
+ "rc48_ck", "pll1_q", "lse_ck", "lsi_ck" };
+
+/* usart 1,6 src */
+static const char * const usart_src1[] = {
+ "pclk2", "pll2_q", "pll3_q", "hsi_ker", "csi_ker", "lse_ck" };
+
+/* usart 2,3,4,5,7,8 src */
+static const char * const usart_src2[] = {
+ "pclk1", "pll2_q", "pll3_q", "hsi_ker", "csi_ker", "lse_ck" };
+
+static const char *sai_src[5] = {
+ "pll1_q", "pll2_p", "pll3_p", NULL, "per_ck" };
+
+static const char * const adc_src[] = { "pll2_p", "pll3_r", "per_ck" };
+
+/* lptim 2,3,4,5 src */
+static const char * const lpuart1_src[] = {
+ "pclk3", "pll2_q", "pll3_q", "csi_ker", "lse_ck" };
+
+static const char * const hrtim_src[] = { "tim2_ker", "d1cpre" };
+
+/* RTC clock parent */
+static const char * const rtc_src[] = { "off", "lse_ck", "lsi_ck", "hse_1M" };
+
+/* Micro-controller output clock parent */
+static const char * const mco_src1[] = {
+ "hsi_ck", "lse_ck", "hse_ck", "pll1_q", "rc48_ck" };
+
+static const char * const mco_src2[] = {
+ "sys_ck", "pll2_p", "hse_ck", "pll1_p", "csi_ck", "lsi_ck" };
+
+/* LCD clock */
+static const char * const ltdc_src[] = {"pll3_r"};
+
+/* Gate clock with ready bit and backup domain management */
+struct stm32_ready_gate {
+ struct clk_gate gate;
+ u8 bit_rdy;
+};
+
+#define to_ready_gate_clk(_rgate) container_of(_rgate, struct stm32_ready_gate,\
+ gate)
+
+#define RGATE_TIMEOUT 10000
+
+static int ready_gate_clk_enable(struct clk_hw *hw)
+{
+ struct clk_gate *gate = to_clk_gate(hw);
+ struct stm32_ready_gate *rgate = to_ready_gate_clk(gate);
+ int bit_status;
+ unsigned int timeout = RGATE_TIMEOUT;
+
+ if (clk_gate_ops.is_enabled(hw))
+ return 0;
+
+ clk_gate_ops.enable(hw);
+
+ /* We can't use readl_poll_timeout() because we can blocked if
+ * someone enables this clock before clocksource changes.
+ * Only jiffies counter is available. Jiffies are incremented by
+ * interruptions and enable op does not allow to be interrupted.
+ */
+ do {
+ bit_status = !(readl(gate->reg) & BIT(rgate->bit_rdy));
+
+ if (bit_status)
+ udelay(100);
+
+ } while (bit_status && --timeout);
+
+ return bit_status;
+}
+
+static void ready_gate_clk_disable(struct clk_hw *hw)
+{
+ struct clk_gate *gate = to_clk_gate(hw);
+ struct stm32_ready_gate *rgate = to_ready_gate_clk(gate);
+ int bit_status;
+ unsigned int timeout = RGATE_TIMEOUT;
+
+ if (!clk_gate_ops.is_enabled(hw))
+ return;
+
+ clk_gate_ops.disable(hw);
+
+ do {
+ bit_status = !!(readl(gate->reg) & BIT(rgate->bit_rdy));
+
+ if (bit_status)
+ udelay(100);
+
+ } while (bit_status && --timeout);
+}
+
+static const struct clk_ops ready_gate_clk_ops = {
+ .enable = ready_gate_clk_enable,
+ .disable = ready_gate_clk_disable,
+ .is_enabled = clk_gate_is_enabled,
+};
+
+static struct clk_hw *clk_register_ready_gate(struct device *dev,
+ const char *name, const char *parent_name,
+ void __iomem *reg, u8 bit_idx, u8 bit_rdy,
+ unsigned long flags, spinlock_t *lock)
+{
+ struct stm32_ready_gate *rgate;
+ struct clk_init_data init = { NULL };
+ struct clk_hw *hw;
+ int ret;
+
+ rgate = kzalloc(sizeof(*rgate), GFP_KERNEL);
+ if (!rgate)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &ready_gate_clk_ops;
+ init.flags = flags;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+
+ rgate->bit_rdy = bit_rdy;
+ rgate->gate.lock = lock;
+ rgate->gate.reg = reg;
+ rgate->gate.bit_idx = bit_idx;
+ rgate->gate.hw.init = &init;
+
+ hw = &rgate->gate.hw;
+ ret = clk_hw_register(dev, hw);
+ if (ret) {
+ kfree(rgate);
+ hw = ERR_PTR(ret);
+ }
+
+ return hw;
+}
+
+struct gate_cfg {
+ u32 offset;
+ u8 bit_idx;
+};
+
+struct muxdiv_cfg {
+ u32 offset;
+ u8 shift;
+ u8 width;
+};
+
+struct composite_clk_cfg {
+ struct gate_cfg *gate;
+ struct muxdiv_cfg *mux;
+ struct muxdiv_cfg *div;
+ const char *name;
+ const char * const *parent_name;
+ int num_parents;
+ u32 flags;
+};
+
+struct composite_clk_gcfg_t {
+ u8 flags;
+ const struct clk_ops *ops;
+};
+
+/*
+ * General config definition of a composite clock (only clock diviser for rate)
+ */
+struct composite_clk_gcfg {
+ struct composite_clk_gcfg_t *mux;
+ struct composite_clk_gcfg_t *div;
+ struct composite_clk_gcfg_t *gate;
+};
+
+#define M_CFG_MUX(_mux_ops, _mux_flags)\
+ .mux = &(struct composite_clk_gcfg_t) { _mux_flags, _mux_ops}
+
+#define M_CFG_DIV(_rate_ops, _rate_flags)\
+ .div = &(struct composite_clk_gcfg_t) {_rate_flags, _rate_ops}
+
+#define M_CFG_GATE(_gate_ops, _gate_flags)\
+ .gate = &(struct composite_clk_gcfg_t) { _gate_flags, _gate_ops}
+
+static struct clk_mux *_get_cmux(void __iomem *reg, u8 shift, u8 width,
+ u32 flags, spinlock_t *lock)
+{
+ struct clk_mux *mux;
+
+ mux = kzalloc(sizeof(*mux), GFP_KERNEL);
+ if (!mux)
+ return ERR_PTR(-ENOMEM);
+
+ mux->reg = reg;
+ mux->shift = shift;
+ mux->mask = (1 << width) - 1;
+ mux->flags = flags;
+ mux->lock = lock;
+
+ return mux;
+}
+
+static struct clk_divider *_get_cdiv(void __iomem *reg, u8 shift, u8 width,
+ u32 flags, spinlock_t *lock)
+{
+ struct clk_divider *div;
+
+ div = kzalloc(sizeof(*div), GFP_KERNEL);
+
+ if (!div)
+ return ERR_PTR(-ENOMEM);
+
+ div->reg = reg;
+ div->shift = shift;
+ div->width = width;
+ div->flags = flags;
+ div->lock = lock;
+
+ return div;
+}
+
+static struct clk_gate *_get_cgate(void __iomem *reg, u8 bit_idx, u32 flags,
+ spinlock_t *lock)
+{
+ struct clk_gate *gate;
+
+ gate = kzalloc(sizeof(*gate), GFP_KERNEL);
+ if (!gate)
+ return ERR_PTR(-ENOMEM);
+
+ gate->reg = reg;
+ gate->bit_idx = bit_idx;
+ gate->flags = flags;
+ gate->lock = lock;
+
+ return gate;
+}
+
+struct composite_cfg {
+ struct clk_hw *mux_hw;
+ struct clk_hw *div_hw;
+ struct clk_hw *gate_hw;
+
+ const struct clk_ops *mux_ops;
+ const struct clk_ops *div_ops;
+ const struct clk_ops *gate_ops;
+};
+
+static void get_cfg_composite_div(const struct composite_clk_gcfg *gcfg,
+ const struct composite_clk_cfg *cfg,
+ struct composite_cfg *composite, spinlock_t *lock)
+{
+ struct clk_mux *mux = NULL;
+ struct clk_divider *div = NULL;
+ struct clk_gate *gate = NULL;
+ const struct clk_ops *mux_ops, *div_ops, *gate_ops;
+ struct clk_hw *mux_hw;
+ struct clk_hw *div_hw;
+ struct clk_hw *gate_hw;
+
+ mux_ops = div_ops = gate_ops = NULL;
+ mux_hw = div_hw = gate_hw = NULL;
+
+ if (gcfg->mux && gcfg->mux) {
+ mux = _get_cmux(base + cfg->mux->offset,
+ cfg->mux->shift,
+ cfg->mux->width,
+ gcfg->mux->flags, lock);
+
+ if (!IS_ERR(mux)) {
+ mux_hw = &mux->hw;
+ mux_ops = gcfg->mux->ops ?
+ gcfg->mux->ops : &clk_mux_ops;
+ }
+ }
+
+ if (gcfg->div && cfg->div) {
+ div = _get_cdiv(base + cfg->div->offset,
+ cfg->div->shift,
+ cfg->div->width,
+ gcfg->div->flags, lock);
+
+ if (!IS_ERR(div)) {
+ div_hw = &div->hw;
+ div_ops = gcfg->div->ops ?
+ gcfg->div->ops : &clk_divider_ops;
+ }
+ }
+
+ if (gcfg->gate && gcfg->gate) {
+ gate = _get_cgate(base + cfg->gate->offset,
+ cfg->gate->bit_idx,
+ gcfg->gate->flags, lock);
+
+ if (!IS_ERR(gate)) {
+ gate_hw = &gate->hw;
+ gate_ops = gcfg->gate->ops ?
+ gcfg->gate->ops : &clk_gate_ops;
+ }
+ }
+
+ composite->mux_hw = mux_hw;
+ composite->mux_ops = mux_ops;
+
+ composite->div_hw = div_hw;
+ composite->div_ops = div_ops;
+
+ composite->gate_hw = gate_hw;
+ composite->gate_ops = gate_ops;
+}
+
+/* Kernel Timer */
+struct timer_ker {
+ u8 dppre_shift;
+ struct clk_hw hw;
+ spinlock_t *lock;
+};
+
+#define to_timer_ker(_hw) container_of(_hw, struct timer_ker, hw)
+
+static unsigned long timer_ker_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct timer_ker *clk_elem = to_timer_ker(hw);
+ u32 timpre;
+ u32 dppre_shift = clk_elem->dppre_shift;
+ u32 prescaler;
+ u32 mul;
+
+ timpre = (readl(base + RCC_CFGR) >> 15) & 0x01;
+
+ prescaler = (readl(base + RCC_D2CFGR) >> dppre_shift) & 0x03;
+
+ mul = 2;
+
+ if (prescaler < 4)
+ mul = 1;
+
+ else if (timpre && prescaler > 4)
+ mul = 4;
+
+ return parent_rate * mul;
+}
+
+static const struct clk_ops timer_ker_ops = {
+ .recalc_rate = timer_ker_recalc_rate,
+};
+
+static struct clk_hw *clk_register_stm32_timer_ker(struct device *dev,
+ const char *name, const char *parent_name,
+ unsigned long flags,
+ u8 dppre_shift,
+ spinlock_t *lock)
+{
+ struct timer_ker *element;
+ struct clk_init_data init;
+ struct clk_hw *hw;
+ int err;
+
+ element = kzalloc(sizeof(*element), GFP_KERNEL);
+ if (!element)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &timer_ker_ops;
+ init.flags = flags;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+
+ element->hw.init = &init;
+ element->lock = lock;
+ element->dppre_shift = dppre_shift;
+
+ hw = &element->hw;
+ err = clk_hw_register(dev, hw);
+
+ if (err) {
+ kfree(element);
+ return ERR_PTR(err);
+ }
+
+ return hw;
+}
+
+static const struct clk_div_table d1cpre_div_table[] = {
+ { 0, 1 }, { 1, 1 }, { 2, 1 }, { 3, 1},
+ { 4, 1 }, { 5, 1 }, { 6, 1 }, { 7, 1},
+ { 8, 2 }, { 9, 4 }, { 10, 8 }, { 11, 16 },
+ { 12, 64 }, { 13, 128 }, { 14, 256 },
+ { 15, 512 },
+ { 0 },
+};
+
+static const struct clk_div_table ppre_div_table[] = {
+ { 0, 1 }, { 1, 1 }, { 2, 1 }, { 3, 1},
+ { 4, 2 }, { 5, 4 }, { 6, 8 }, { 7, 16 },
+ { 0 },
+};
+
+static void register_core_and_bus_clocks(void)
+{
+ /* CORE AND BUS */
+ hws[SYS_D1CPRE] = clk_hw_register_divider_table(NULL, "d1cpre",
+ "sys_ck", CLK_IGNORE_UNUSED, base + RCC_D1CFGR, 8, 4, 0,
+ d1cpre_div_table, &stm32rcc_lock);
+
+ hws[HCLK] = clk_hw_register_divider_table(NULL, "hclk", "d1cpre",
+ CLK_IGNORE_UNUSED, base + RCC_D1CFGR, 0, 4, 0,
+ d1cpre_div_table, &stm32rcc_lock);
+
+ /* D1 DOMAIN */
+ /* * CPU Systick */
+ hws[CPU_SYSTICK] = clk_hw_register_fixed_factor(NULL, "systick",
+ "d1cpre", 0, 1, 8);
+
+ /* * APB3 peripheral */
+ hws[PCLK3] = clk_hw_register_divider_table(NULL, "pclk3", "hclk", 0,
+ base + RCC_D1CFGR, 4, 3, 0,
+ ppre_div_table, &stm32rcc_lock);
+
+ /* D2 DOMAIN */
+ /* * APB1 peripheral */
+ hws[PCLK1] = clk_hw_register_divider_table(NULL, "pclk1", "hclk", 0,
+ base + RCC_D2CFGR, 4, 3, 0,
+ ppre_div_table, &stm32rcc_lock);
+
+ /* Timers prescaler clocks */
+ clk_register_stm32_timer_ker(NULL, "tim1_ker", "pclk1", 0,
+ 4, &stm32rcc_lock);
+
+ /* * APB2 peripheral */
+ hws[PCLK2] = clk_hw_register_divider_table(NULL, "pclk2", "hclk", 0,
+ base + RCC_D2CFGR, 8, 3, 0, ppre_div_table,
+ &stm32rcc_lock);
+
+ clk_register_stm32_timer_ker(NULL, "tim2_ker", "pclk2", 0, 8,
+ &stm32rcc_lock);
+
+ /* D3 DOMAIN */
+ /* * APB4 peripheral */
+ hws[PCLK4] = clk_hw_register_divider_table(NULL, "pclk4", "hclk", 0,
+ base + RCC_D3CFGR, 4, 3, 0,
+ ppre_div_table, &stm32rcc_lock);
+}
+
+/* MUX clock configuration */
+struct stm32_mux_clk {
+ const char *name;
+ const char * const *parents;
+ u8 num_parents;
+ u32 offset;
+ u8 shift;
+ u8 width;
+ u32 flags;
+};
+
+#define M_MCLOCF(_name, _parents, _mux_offset, _mux_shift, _mux_width, _flags)\
+{\
+ .name = _name,\
+ .parents = _parents,\
+ .num_parents = ARRAY_SIZE(_parents),\
+ .offset = _mux_offset,\
+ .shift = _mux_shift,\
+ .width = _mux_width,\
+ .flags = _flags,\
+}
+
+#define M_MCLOC(_name, _parents, _mux_offset, _mux_shift, _mux_width)\
+ M_MCLOCF(_name, _parents, _mux_offset, _mux_shift, _mux_width, 0)\
+
+static const struct stm32_mux_clk stm32_mclk[] __initconst = {
+ M_MCLOC("per_ck", per_src, RCC_D1CCIPR, 28, 3),
+ M_MCLOC("pllsrc", pll_src, RCC_PLLCKSELR, 0, 3),
+ M_MCLOC("sys_ck", sys_src, RCC_CFGR, 0, 3),
+ M_MCLOC("tracein_ck", tracein_src, RCC_CFGR, 0, 3),
+};
+
+/* Oscillary clock configuration */
+struct stm32_osc_clk {
+ const char *name;
+ const char *parent;
+ u32 gate_offset;
+ u8 bit_idx;
+ u8 bit_rdy;
+ u32 flags;
+};
+
+#define OSC_CLKF(_name, _parent, _gate_offset, _bit_idx, _bit_rdy, _flags)\
+{\
+ .name = _name,\
+ .parent = _parent,\
+ .gate_offset = _gate_offset,\
+ .bit_idx = _bit_idx,\
+ .bit_rdy = _bit_rdy,\
+ .flags = _flags,\
+}
+
+#define OSC_CLK(_name, _parent, _gate_offset, _bit_idx, _bit_rdy)\
+ OSC_CLKF(_name, _parent, _gate_offset, _bit_idx, _bit_rdy, 0)
+
+static const struct stm32_osc_clk stm32_oclk[] __initconst = {
+ OSC_CLKF("hsi_ck", "hsidiv", RCC_CR, 0, 2, CLK_IGNORE_UNUSED),
+ OSC_CLKF("hsi_ker", "hsidiv", RCC_CR, 1, 2, CLK_IGNORE_UNUSED),
+ OSC_CLKF("csi_ck", "clk-csi", RCC_CR, 7, 8, CLK_IGNORE_UNUSED),
+ OSC_CLKF("csi_ker", "clk-csi", RCC_CR, 9, 8, CLK_IGNORE_UNUSED),
+ OSC_CLKF("rc48_ck", "clk-rc48", RCC_CR, 12, 13, CLK_IGNORE_UNUSED),
+ OSC_CLKF("lsi_ck", "clk-lsi", RCC_CSR, 0, 1, CLK_IGNORE_UNUSED),
+};
+
+/* PLL configuration */
+struct st32h7_pll_cfg {
+ u8 bit_idx;
+ u32 offset_divr;
+ u8 bit_frac_en;
+ u32 offset_frac;
+ u8 divm;
+};
+
+struct stm32_pll_data {
+ const char *name;
+ const char *parent_name;
+ unsigned long flags;
+ const struct st32h7_pll_cfg *cfg;
+};
+
+static const struct st32h7_pll_cfg stm32h7_pll1 = {
+ .bit_idx = 24,
+ .offset_divr = RCC_PLL1DIVR,
+ .bit_frac_en = 0,
+ .offset_frac = RCC_PLL1FRACR,
+ .divm = 4,
+};
+
+static const struct st32h7_pll_cfg stm32h7_pll2 = {
+ .bit_idx = 26,
+ .offset_divr = RCC_PLL2DIVR,
+ .bit_frac_en = 4,
+ .offset_frac = RCC_PLL2FRACR,
+ .divm = 12,
+};
+
+static const struct st32h7_pll_cfg stm32h7_pll3 = {
+ .bit_idx = 28,
+ .offset_divr = RCC_PLL3DIVR,
+ .bit_frac_en = 8,
+ .offset_frac = RCC_PLL3FRACR,
+ .divm = 20,
+};
+
+static const struct stm32_pll_data stm32_pll[] = {
+ { "vco1", "pllsrc", CLK_IGNORE_UNUSED, &stm32h7_pll1 },
+ { "vco2", "pllsrc", 0, &stm32h7_pll2 },
+ { "vco3", "pllsrc", 0, &stm32h7_pll3 },
+};
+
+struct stm32_fractional_divider {
+ void __iomem *mreg;
+ u8 mshift;
+ u8 mwidth;
+ u32 mmask;
+
+ void __iomem *nreg;
+ u8 nshift;
+ u8 nwidth;
+
+ void __iomem *freg_status;
+ u8 freg_bit;
+ void __iomem *freg_value;
+ u8 fshift;
+ u8 fwidth;
+
+ u8 flags;
+ struct clk_hw hw;
+ spinlock_t *lock;
+};
+
+struct stm32_pll_obj {
+ spinlock_t *lock;
+ struct stm32_fractional_divider div;
+ struct stm32_ready_gate rgate;
+ struct clk_hw hw;
+};
+
+#define to_pll(_hw) container_of(_hw, struct stm32_pll_obj, hw)
+
+static int pll_is_enabled(struct clk_hw *hw)
+{
+ struct stm32_pll_obj *clk_elem = to_pll(hw);
+ struct clk_hw *_hw = &clk_elem->rgate.gate.hw;
+
+ __clk_hw_set_clk(_hw, hw);
+
+ return ready_gate_clk_ops.is_enabled(_hw);
+}
+
+static int pll_enable(struct clk_hw *hw)
+{
+ struct stm32_pll_obj *clk_elem = to_pll(hw);
+ struct clk_hw *_hw = &clk_elem->rgate.gate.hw;
+
+ __clk_hw_set_clk(_hw, hw);
+
+ return ready_gate_clk_ops.enable(_hw);
+}
+
+static void pll_disable(struct clk_hw *hw)
+{
+ struct stm32_pll_obj *clk_elem = to_pll(hw);
+ struct clk_hw *_hw = &clk_elem->rgate.gate.hw;
+
+ __clk_hw_set_clk(_hw, hw);
+
+ ready_gate_clk_ops.disable(_hw);
+}
+
+static int pll_frac_is_enabled(struct clk_hw *hw)
+{
+ struct stm32_pll_obj *clk_elem = to_pll(hw);
+ struct stm32_fractional_divider *fd = &clk_elem->div;
+
+ return (readl(fd->freg_status) >> fd->freg_bit) & 0x01;
+}
+
+static unsigned long pll_read_frac(struct clk_hw *hw)
+{
+ struct stm32_pll_obj *clk_elem = to_pll(hw);
+ struct stm32_fractional_divider *fd = &clk_elem->div;
+
+ return (readl(fd->freg_value) >> fd->fshift) &
+ GENMASK(fd->fwidth - 1, 0);
+}
+
+static unsigned long pll_fd_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct stm32_pll_obj *clk_elem = to_pll(hw);
+ struct stm32_fractional_divider *fd = &clk_elem->div;
+ unsigned long m, n;
+ u32 val, mask;
+ u64 rate, rate1 = 0;
+
+ val = readl(fd->mreg);
+ mask = GENMASK(fd->mwidth - 1, 0) << fd->mshift;
+ m = (val & mask) >> fd->mshift;
+
+ val = readl(fd->nreg);
+ mask = GENMASK(fd->nwidth - 1, 0) << fd->nshift;
+ n = ((val & mask) >> fd->nshift) + 1;
+
+ if (!n || !m)
+ return parent_rate;
+
+ rate = (u64)parent_rate * n;
+ do_div(rate, m);
+
+ if (pll_frac_is_enabled(hw)) {
+ val = pll_read_frac(hw);
+ rate1 = (u64)parent_rate * (u64)val;
+ do_div(rate1, (m * 8191));
+ }
+
+ return rate + rate1;
+}
+
+static const struct clk_ops pll_ops = {
+ .enable = pll_enable,
+ .disable = pll_disable,
+ .is_enabled = pll_is_enabled,
+ .recalc_rate = pll_fd_recalc_rate,
+};
+
+static struct clk_hw *clk_register_stm32_pll(struct device *dev,
+ const char *name,
+ const char *parent,
+ unsigned long flags,
+ const struct st32h7_pll_cfg *cfg,
+ spinlock_t *lock)
+{
+ struct stm32_pll_obj *pll;
+ struct clk_init_data init = { NULL };
+ struct clk_hw *hw;
+ int ret;
+ struct stm32_fractional_divider *div = NULL;
+ struct stm32_ready_gate *rgate;
+
+ pll = kzalloc(sizeof(*pll), GFP_KERNEL);
+ if (!pll)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &pll_ops;
+ init.flags = flags;
+ init.parent_names = &parent;
+ init.num_parents = 1;
+ pll->hw.init = &init;
+
+ hw = &pll->hw;
+ rgate = &pll->rgate;
+
+ rgate->bit_rdy = cfg->bit_idx + 1;
+ rgate->gate.lock = lock;
+ rgate->gate.reg = base + RCC_CR;
+ rgate->gate.bit_idx = cfg->bit_idx;
+
+ div = &pll->div;
+ div->flags = 0;
+ div->mreg = base + RCC_PLLCKSELR;
+ div->mshift = cfg->divm;
+ div->mwidth = 6;
+ div->nreg = base + cfg->offset_divr;
+ div->nshift = 0;
+ div->nwidth = 9;
+
+ div->freg_status = base + RCC_PLLCFGR;
+ div->freg_bit = cfg->bit_frac_en;
+ div->freg_value = base + cfg->offset_frac;
+ div->fshift = 3;
+ div->fwidth = 13;
+
+ div->lock = lock;
+
+ ret = clk_hw_register(dev, hw);
+ if (ret) {
+ kfree(pll);
+ hw = ERR_PTR(ret);
+ }
+
+ return hw;
+}
+
+/* ODF CLOCKS */
+static unsigned long odf_divider_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ return clk_divider_ops.recalc_rate(hw, parent_rate);
+}
+
+static long odf_divider_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ return clk_divider_ops.round_rate(hw, rate, prate);
+}
+
+static int odf_divider_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_hw *hwp;
+ int pll_status;
+ int ret;
+
+ hwp = clk_hw_get_parent(hw);
+
+ pll_status = pll_is_enabled(hwp);
+
+ if (pll_status)
+ pll_disable(hwp);
+
+ ret = clk_divider_ops.set_rate(hw, rate, parent_rate);
+
+ if (pll_status)
+ pll_enable(hwp);
+
+ return ret;
+}
+
+static const struct clk_ops odf_divider_ops = {
+ .recalc_rate = odf_divider_recalc_rate,
+ .round_rate = odf_divider_round_rate,
+ .set_rate = odf_divider_set_rate,
+};
+
+static int odf_gate_enable(struct clk_hw *hw)
+{
+ struct clk_hw *hwp;
+ int pll_status;
+ int ret;
+
+ if (clk_gate_ops.is_enabled(hw))
+ return 0;
+
+ hwp = clk_hw_get_parent(hw);
+
+ pll_status = pll_is_enabled(hwp);
+
+ if (pll_status)
+ pll_disable(hwp);
+
+ ret = clk_gate_ops.enable(hw);
+
+ if (pll_status)
+ pll_enable(hwp);
+
+ return ret;
+}
+
+static void odf_gate_disable(struct clk_hw *hw)
+{
+ struct clk_hw *hwp;
+ int pll_status;
+
+ if (!clk_gate_ops.is_enabled(hw))
+ return;
+
+ hwp = clk_hw_get_parent(hw);
+
+ pll_status = pll_is_enabled(hwp);
+
+ if (pll_status)
+ pll_disable(hwp);
+
+ clk_gate_ops.disable(hw);
+
+ if (pll_status)
+ pll_enable(hwp);
+}
+
+static const struct clk_ops odf_gate_ops = {
+ .enable = odf_gate_enable,
+ .disable = odf_gate_disable,
+ .is_enabled = clk_gate_is_enabled,
+};
+
+static struct composite_clk_gcfg odf_clk_gcfg = {
+ M_CFG_DIV(&odf_divider_ops, 0),
+ M_CFG_GATE(&odf_gate_ops, 0),
+};
+
+#define M_ODF_F(_name, _parent, _gate_offset, _bit_idx, _rate_offset,\
+ _rate_shift, _rate_width, _flags)\
+{\
+ .mux = NULL,\
+ .div = &(struct muxdiv_cfg) {_rate_offset, _rate_shift, _rate_width},\
+ .gate = &(struct gate_cfg) {_gate_offset, _bit_idx },\
+ .name = _name,\
+ .parent_name = &(const char *) {_parent},\
+ .num_parents = 1,\
+ .flags = _flags,\
+}
+
+#define M_ODF(_name, _parent, _gate_offset, _bit_idx, _rate_offset,\
+ _rate_shift, _rate_width)\
+M_ODF_F(_name, _parent, _gate_offset, _bit_idx, _rate_offset,\
+ _rate_shift, _rate_width, 0)\
+
+static const struct composite_clk_cfg stm32_odf[3][3] = {
+ {
+ M_ODF_F("pll1_p", "vco1", RCC_PLLCFGR, 16, RCC_PLL1DIVR, 9, 7,
+ CLK_IGNORE_UNUSED),
+ M_ODF_F("pll1_q", "vco1", RCC_PLLCFGR, 17, RCC_PLL1DIVR, 16, 7,
+ CLK_IGNORE_UNUSED),
+ M_ODF_F("pll1_r", "vco1", RCC_PLLCFGR, 18, RCC_PLL1DIVR, 24, 7,
+ CLK_IGNORE_UNUSED),
+ },
+
+ {
+ M_ODF("pll2_p", "vco2", RCC_PLLCFGR, 19, RCC_PLL2DIVR, 9, 7),
+ M_ODF("pll2_q", "vco2", RCC_PLLCFGR, 20, RCC_PLL2DIVR, 16, 7),
+ M_ODF("pll2_r", "vco2", RCC_PLLCFGR, 21, RCC_PLL2DIVR, 24, 7),
+ },
+ {
+ M_ODF("pll3_p", "vco3", RCC_PLLCFGR, 22, RCC_PLL3DIVR, 9, 7),
+ M_ODF("pll3_q", "vco3", RCC_PLLCFGR, 23, RCC_PLL3DIVR, 16, 7),
+ M_ODF("pll3_r", "vco3", RCC_PLLCFGR, 24, RCC_PLL3DIVR, 24, 7),
+ }
+};
+
+/* PERIF CLOCKS */
+struct pclk_t {
+ u32 gate_offset;
+ u8 bit_idx;
+ const char *name;
+ const char *parent;
+ u32 flags;
+};
+
+#define PER_CLKF(_gate_offset, _bit_idx, _name, _parent, _flags)\
+{\
+ .gate_offset = _gate_offset,\
+ .bit_idx = _bit_idx,\
+ .name = _name,\
+ .parent = _parent,\
+ .flags = _flags,\
+}
+
+#define PER_CLK(_gate_offset, _bit_idx, _name, _parent)\
+ PER_CLKF(_gate_offset, _bit_idx, _name, _parent, 0)
+
+static const struct pclk_t pclk[] = {
+ PER_CLK(RCC_AHB3ENR, 31, "d1sram1", "hclk"),
+ PER_CLK(RCC_AHB3ENR, 30, "itcm", "hclk"),
+ PER_CLK(RCC_AHB3ENR, 29, "dtcm2", "hclk"),
+ PER_CLK(RCC_AHB3ENR, 28, "dtcm1", "hclk"),
+ PER_CLK(RCC_AHB3ENR, 8, "flitf", "hclk"),
+ PER_CLK(RCC_AHB3ENR, 5, "jpgdec", "hclk"),
+ PER_CLK(RCC_AHB3ENR, 4, "dma2d", "hclk"),
+ PER_CLK(RCC_AHB3ENR, 0, "mdma", "hclk"),
+ PER_CLK(RCC_AHB1ENR, 28, "usb2ulpi", "hclk"),
+ PER_CLK(RCC_AHB1ENR, 26, "usb1ulpi", "hclk"),
+ PER_CLK(RCC_AHB1ENR, 17, "eth1rx", "hclk"),
+ PER_CLK(RCC_AHB1ENR, 16, "eth1tx", "hclk"),
+ PER_CLK(RCC_AHB1ENR, 15, "eth1mac", "hclk"),
+ PER_CLK(RCC_AHB1ENR, 14, "art", "hclk"),
+ PER_CLK(RCC_AHB1ENR, 1, "dma2", "hclk"),
+ PER_CLK(RCC_AHB1ENR, 0, "dma1", "hclk"),
+ PER_CLK(RCC_AHB2ENR, 31, "d2sram3", "hclk"),
+ PER_CLK(RCC_AHB2ENR, 30, "d2sram2", "hclk"),
+ PER_CLK(RCC_AHB2ENR, 29, "d2sram1", "hclk"),
+ PER_CLK(RCC_AHB2ENR, 5, "hash", "hclk"),
+ PER_CLK(RCC_AHB2ENR, 4, "crypt", "hclk"),
+ PER_CLK(RCC_AHB2ENR, 0, "camitf", "hclk"),
+ PER_CLK(RCC_AHB4ENR, 28, "bkpram", "hclk"),
+ PER_CLK(RCC_AHB4ENR, 25, "hsem", "hclk"),
+ PER_CLK(RCC_AHB4ENR, 21, "bdma", "hclk"),
+ PER_CLK(RCC_AHB4ENR, 19, "crc", "hclk"),
+ PER_CLK(RCC_AHB4ENR, 10, "gpiok", "hclk"),
+ PER_CLK(RCC_AHB4ENR, 9, "gpioj", "hclk"),
+ PER_CLK(RCC_AHB4ENR, 8, "gpioi", "hclk"),
+ PER_CLK(RCC_AHB4ENR, 7, "gpioh", "hclk"),
+ PER_CLK(RCC_AHB4ENR, 6, "gpiog", "hclk"),
+ PER_CLK(RCC_AHB4ENR, 5, "gpiof", "hclk"),
+ PER_CLK(RCC_AHB4ENR, 4, "gpioe", "hclk"),
+ PER_CLK(RCC_AHB4ENR, 3, "gpiod", "hclk"),
+ PER_CLK(RCC_AHB4ENR, 2, "gpioc", "hclk"),
+ PER_CLK(RCC_AHB4ENR, 1, "gpiob", "hclk"),
+ PER_CLK(RCC_AHB4ENR, 0, "gpioa", "hclk"),
+ PER_CLK(RCC_APB3ENR, 6, "wwdg1", "pclk3"),
+ PER_CLK(RCC_APB1LENR, 29, "dac12", "pclk1"),
+ PER_CLK(RCC_APB1LENR, 11, "wwdg2", "pclk1"),
+ PER_CLK(RCC_APB1LENR, 8, "tim14", "tim1_ker"),
+ PER_CLK(RCC_APB1LENR, 7, "tim13", "tim1_ker"),
+ PER_CLK(RCC_APB1LENR, 6, "tim12", "tim1_ker"),
+ PER_CLK(RCC_APB1LENR, 5, "tim7", "tim1_ker"),
+ PER_CLK(RCC_APB1LENR, 4, "tim6", "tim1_ker"),
+ PER_CLK(RCC_APB1LENR, 3, "tim5", "tim1_ker"),
+ PER_CLK(RCC_APB1LENR, 2, "tim4", "tim1_ker"),
+ PER_CLK(RCC_APB1LENR, 1, "tim3", "tim1_ker"),
+ PER_CLK(RCC_APB1LENR, 0, "tim2", "tim1_ker"),
+ PER_CLK(RCC_APB1HENR, 5, "mdios", "pclk1"),
+ PER_CLK(RCC_APB1HENR, 4, "opamp", "pclk1"),
+ PER_CLK(RCC_APB1HENR, 1, "crs", "pclk1"),
+ PER_CLK(RCC_APB2ENR, 18, "tim17", "tim2_ker"),
+ PER_CLK(RCC_APB2ENR, 17, "tim16", "tim2_ker"),
+ PER_CLK(RCC_APB2ENR, 16, "tim15", "tim2_ker"),
+ PER_CLK(RCC_APB2ENR, 1, "tim8", "tim2_ker"),
+ PER_CLK(RCC_APB2ENR, 0, "tim1", "tim2_ker"),
+ PER_CLK(RCC_APB4ENR, 26, "tmpsens", "pclk4"),
+ PER_CLK(RCC_APB4ENR, 16, "rtcapb", "pclk4"),
+ PER_CLK(RCC_APB4ENR, 15, "vref", "pclk4"),
+ PER_CLK(RCC_APB4ENR, 14, "comp12", "pclk4"),
+ PER_CLK(RCC_APB4ENR, 1, "syscfg", "pclk4"),
+};
+
+/* KERNEL CLOCKS */
+#define KER_CLKF(_gate_offset, _bit_idx,\
+ _mux_offset, _mux_shift, _mux_width,\
+ _name, _parent_name,\
+ _flags) \
+{ \
+ .gate = &(struct gate_cfg) {_gate_offset, _bit_idx},\
+ .mux = &(struct muxdiv_cfg) {_mux_offset, _mux_shift, _mux_width },\
+ .name = _name, \
+ .parent_name = _parent_name, \
+ .num_parents = ARRAY_SIZE(_parent_name),\
+ .flags = _flags,\
+}
+
+#define KER_CLK(_gate_offset, _bit_idx, _mux_offset, _mux_shift, _mux_width,\
+ _name, _parent_name) \
+KER_CLKF(_gate_offset, _bit_idx, _mux_offset, _mux_shift, _mux_width,\
+ _name, _parent_name, 0)\
+
+#define KER_CLKF_NOMUX(_gate_offset, _bit_idx,\
+ _name, _parent_name,\
+ _flags) \
+{ \
+ .gate = &(struct gate_cfg) {_gate_offset, _bit_idx},\
+ .mux = NULL,\
+ .name = _name, \
+ .parent_name = _parent_name, \
+ .num_parents = 1,\
+ .flags = _flags,\
+}
+
+static const struct composite_clk_cfg kclk[] = {
+ KER_CLK(RCC_AHB3ENR, 16, RCC_D1CCIPR, 16, 1, "sdmmc1", sdmmc_src),
+ KER_CLKF(RCC_AHB3ENR, 14, RCC_D1CCIPR, 4, 2, "quadspi", qspi_src,
+ CLK_IGNORE_UNUSED),
+ KER_CLKF(RCC_AHB3ENR, 12, RCC_D1CCIPR, 0, 2, "fmc", fmc_src,
+ CLK_IGNORE_UNUSED),
+ KER_CLK(RCC_AHB1ENR, 27, RCC_D2CCIP2R, 20, 2, "usb2otg", usbotg_src),
+ KER_CLK(RCC_AHB1ENR, 25, RCC_D2CCIP2R, 20, 2, "usb1otg", usbotg_src),
+ KER_CLK(RCC_AHB1ENR, 5, RCC_D3CCIPR, 16, 2, "adc12", adc_src),
+ KER_CLK(RCC_AHB2ENR, 9, RCC_D1CCIPR, 16, 1, "sdmmc2", sdmmc_src),
+ KER_CLK(RCC_AHB2ENR, 6, RCC_D2CCIP2R, 8, 2, "rng", rng_src),
+ KER_CLK(RCC_AHB4ENR, 24, RCC_D3CCIPR, 16, 2, "adc3", adc_src),
+ KER_CLKF(RCC_APB3ENR, 4, RCC_D1CCIPR, 8, 1, "dsi", dsi_src,
+ CLK_SET_RATE_PARENT),
+ KER_CLKF_NOMUX(RCC_APB3ENR, 3, "ltdc", ltdc_src, CLK_SET_RATE_PARENT),
+ KER_CLK(RCC_APB1LENR, 31, RCC_D2CCIP2R, 0, 3, "usart8", usart_src2),
+ KER_CLK(RCC_APB1LENR, 30, RCC_D2CCIP2R, 0, 3, "usart7", usart_src2),
+ KER_CLK(RCC_APB1LENR, 27, RCC_D2CCIP2R, 22, 2, "hdmicec", cec_src),
+ KER_CLK(RCC_APB1LENR, 23, RCC_D2CCIP2R, 12, 2, "i2c3", i2c_src1),
+ KER_CLK(RCC_APB1LENR, 22, RCC_D2CCIP2R, 12, 2, "i2c2", i2c_src1),
+ KER_CLK(RCC_APB1LENR, 21, RCC_D2CCIP2R, 12, 2, "i2c1", i2c_src1),
+ KER_CLK(RCC_APB1LENR, 20, RCC_D2CCIP2R, 0, 3, "uart5", usart_src2),
+ KER_CLK(RCC_APB1LENR, 19, RCC_D2CCIP2R, 0, 3, "uart4", usart_src2),
+ KER_CLK(RCC_APB1LENR, 18, RCC_D2CCIP2R, 0, 3, "usart3", usart_src2),
+ KER_CLK(RCC_APB1LENR, 17, RCC_D2CCIP2R, 0, 3, "usart2", usart_src2),
+ KER_CLK(RCC_APB1LENR, 16, RCC_D2CCIP1R, 20, 2, "spdifrx", spdifrx_src),
+ KER_CLK(RCC_APB1LENR, 15, RCC_D2CCIP1R, 16, 3, "spi3", spi_src1),
+ KER_CLK(RCC_APB1LENR, 14, RCC_D2CCIP1R, 16, 3, "spi2", spi_src1),
+ KER_CLK(RCC_APB1LENR, 9, RCC_D2CCIP2R, 28, 3, "lptim1", lptim_src1),
+ KER_CLK(RCC_APB1HENR, 8, RCC_D2CCIP1R, 28, 2, "fdcan", fdcan_src),
+ KER_CLK(RCC_APB1HENR, 2, RCC_D2CCIP1R, 31, 1, "swp", swp_src),
+ KER_CLK(RCC_APB2ENR, 29, RCC_CFGR, 14, 1, "hrtim", hrtim_src),
+ KER_CLK(RCC_APB2ENR, 28, RCC_D2CCIP1R, 24, 1, "dfsdm1", dfsdm1_src),
+ KER_CLKF(RCC_APB2ENR, 24, RCC_D2CCIP1R, 6, 3, "sai3", sai_src,
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT),
+ KER_CLKF(RCC_APB2ENR, 23, RCC_D2CCIP1R, 6, 3, "sai2", sai_src,
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT),
+ KER_CLKF(RCC_APB2ENR, 22, RCC_D2CCIP1R, 0, 3, "sai1", sai_src,
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT),
+ KER_CLK(RCC_APB2ENR, 20, RCC_D2CCIP1R, 16, 3, "spi5", spi_src2),
+ KER_CLK(RCC_APB2ENR, 13, RCC_D2CCIP1R, 16, 3, "spi4", spi_src2),
+ KER_CLK(RCC_APB2ENR, 12, RCC_D2CCIP1R, 16, 3, "spi1", spi_src1),
+ KER_CLK(RCC_APB2ENR, 5, RCC_D2CCIP2R, 3, 3, "usart6", usart_src1),
+ KER_CLK(RCC_APB2ENR, 4, RCC_D2CCIP2R, 3, 3, "usart1", usart_src1),
+ KER_CLK(RCC_APB4ENR, 21, RCC_D3CCIPR, 24, 3, "sai4b", sai_src),
+ KER_CLK(RCC_APB4ENR, 21, RCC_D3CCIPR, 21, 3, "sai4a", sai_src),
+ KER_CLK(RCC_APB4ENR, 12, RCC_D3CCIPR, 13, 3, "lptim5", lptim_src2),
+ KER_CLK(RCC_APB4ENR, 11, RCC_D3CCIPR, 13, 3, "lptim4", lptim_src2),
+ KER_CLK(RCC_APB4ENR, 10, RCC_D3CCIPR, 13, 3, "lptim3", lptim_src2),
+ KER_CLK(RCC_APB4ENR, 9, RCC_D3CCIPR, 10, 3, "lptim2", lptim_src2),
+ KER_CLK(RCC_APB4ENR, 7, RCC_D3CCIPR, 8, 2, "i2c4", i2c_src2),
+ KER_CLK(RCC_APB4ENR, 5, RCC_D3CCIPR, 28, 3, "spi6", spi_src3),
+ KER_CLK(RCC_APB4ENR, 3, RCC_D3CCIPR, 0, 3, "lpuart1", lpuart1_src),
+};
+
+static struct composite_clk_gcfg kernel_clk_cfg = {
+ M_CFG_MUX(NULL, 0),
+ M_CFG_GATE(NULL, 0),
+};
+
+/* RTC clock */
+/*
+ * RTC & LSE registers are protected against parasitic write access.
+ * PWR_CR_DBP bit must be set to enable write access to RTC registers.
+ */
+/* STM32_PWR_CR */
+#define PWR_CR 0x00
+/* STM32_PWR_CR bit field */
+#define PWR_CR_DBP BIT(8)
+
+static struct composite_clk_gcfg rtc_clk_cfg = {
+ M_CFG_MUX(NULL, 0),
+ M_CFG_GATE(NULL, 0),
+};
+
+static const struct composite_clk_cfg rtc_clk =
+ KER_CLK(RCC_BDCR, 15, RCC_BDCR, 8, 2, "rtc_ck", rtc_src);
+
+/* Micro-controller output clock */
+static struct composite_clk_gcfg mco_clk_cfg = {
+ M_CFG_MUX(NULL, 0),
+ M_CFG_DIV(NULL, CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO),
+};
+
+#define M_MCO_F(_name, _parents, _mux_offset, _mux_shift, _mux_width,\
+ _rate_offset, _rate_shift, _rate_width,\
+ _flags)\
+{\
+ .mux = &(struct muxdiv_cfg) {_mux_offset, _mux_shift, _mux_width },\
+ .div = &(struct muxdiv_cfg) {_rate_offset, _rate_shift, _rate_width},\
+ .gate = NULL,\
+ .name = _name,\
+ .parent_name = _parents,\
+ .num_parents = ARRAY_SIZE(_parents),\
+ .flags = _flags,\
+}
+
+static const struct composite_clk_cfg mco_clk[] = {
+ M_MCO_F("mco1", mco_src1, RCC_CFGR, 22, 4, RCC_CFGR, 18, 4, 0),
+ M_MCO_F("mco2", mco_src2, RCC_CFGR, 29, 3, RCC_CFGR, 25, 4, 0),
+};
+
+static void __init stm32h7_rcc_init(struct device_node *np)
+{
+ struct clk_hw_onecell_data *clk_data;
+ struct composite_cfg c_cfg;
+ int n;
+ const char *hse_clk, *lse_clk, *i2s_clk;
+ struct regmap *pdrm;
+
+ clk_data = kzalloc(sizeof(*clk_data) +
+ sizeof(*clk_data->hws) * STM32H7_MAX_CLKS,
+ GFP_KERNEL);
+ if (!clk_data)
+ return;
+
+ clk_data->num = STM32H7_MAX_CLKS;
+
+ hws = clk_data->hws;
+
+ for (n = 0; n < STM32H7_MAX_CLKS; n++)
+ hws[n] = ERR_PTR(-ENOENT);
+
+ /* get RCC base @ from DT */
+ base = of_iomap(np, 0);
+ if (!base) {
+ pr_err("%s: unable to map resource", np->name);
+ goto err_free_clks;
+ }
+
+ pdrm = syscon_regmap_lookup_by_phandle(np, "st,syscfg");
+ if (IS_ERR(pdrm))
+ pr_warn("%s: Unable to get syscfg\n", __func__);
+ else
+ /* In any case disable backup domain write protection
+ * and will never be enabled.
+ * Needed by LSE & RTC clocks.
+ */
+ regmap_update_bits(pdrm, PWR_CR, PWR_CR_DBP, PWR_CR_DBP);
+
+ /* Put parent names from DT */
+ hse_clk = of_clk_get_parent_name(np, 0);
+ lse_clk = of_clk_get_parent_name(np, 1);
+ i2s_clk = of_clk_get_parent_name(np, 2);
+
+ sai_src[3] = i2s_clk;
+ spi_src1[3] = i2s_clk;
+
+ /* Register Internal oscillators */
+ clk_hw_register_fixed_rate(NULL, "clk-hsi", NULL, 0, 64000000);
+ clk_hw_register_fixed_rate(NULL, "clk-csi", NULL, 0, 4000000);
+ clk_hw_register_fixed_rate(NULL, "clk-lsi", NULL, 0, 32000);
+ clk_hw_register_fixed_rate(NULL, "clk-rc48", NULL, 0, 48000);
+
+ /* This clock is coming from outside. Frequencies unknown */
+ hws[CK_DSI_PHY] = clk_hw_register_fixed_rate(NULL, "ck_dsi_phy", NULL,
+ 0, 0);
+
+ hws[HSI_DIV] = clk_hw_register_divider(NULL, "hsidiv", "clk-hsi", 0,
+ base + RCC_CR, 3, 2, CLK_DIVIDER_POWER_OF_TWO,
+ &stm32rcc_lock);
+
+ hws[HSE_1M] = clk_hw_register_divider(NULL, "hse_1M", "hse_ck", 0,
+ base + RCC_CFGR, 8, 6, CLK_DIVIDER_ONE_BASED |
+ CLK_DIVIDER_ALLOW_ZERO,
+ &stm32rcc_lock);
+
+ /* Mux system clocks */
+ for (n = 0; n < ARRAY_SIZE(stm32_mclk); n++)
+ hws[MCLK_BANK + n] = clk_hw_register_mux(NULL,
+ stm32_mclk[n].name,
+ stm32_mclk[n].parents,
+ stm32_mclk[n].num_parents,
+ stm32_mclk[n].flags,
+ stm32_mclk[n].offset + base,
+ stm32_mclk[n].shift,
+ stm32_mclk[n].width,
+ 0,
+ &stm32rcc_lock);
+
+ register_core_and_bus_clocks();
+
+ /* Oscillary clocks */
+ for (n = 0; n < ARRAY_SIZE(stm32_oclk); n++)
+ hws[OSC_BANK + n] = clk_register_ready_gate(NULL,
+ stm32_oclk[n].name,
+ stm32_oclk[n].parent,
+ stm32_oclk[n].gate_offset + base,
+ stm32_oclk[n].bit_idx,
+ stm32_oclk[n].bit_rdy,
+ stm32_oclk[n].flags,
+ &stm32rcc_lock);
+
+ hws[HSE_CK] = clk_register_ready_gate(NULL,
+ "hse_ck",
+ hse_clk,
+ RCC_CR + base,
+ 16, 17,
+ 0,
+ &stm32rcc_lock);
+
+ hws[LSE_CK] = clk_register_ready_gate(NULL,
+ "lse_ck",
+ lse_clk,
+ RCC_BDCR + base,
+ 0, 1,
+ 0,
+ &stm32rcc_lock);
+
+ hws[CSI_KER_DIV122 + n] = clk_hw_register_fixed_factor(NULL,
+ "csi_ker_div122", "csi_ker", 0, 1, 122);
+
+ /* PLLs */
+ for (n = 0; n < ARRAY_SIZE(stm32_pll); n++) {
+ int odf;
+
+ /* Register the VCO */
+ clk_register_stm32_pll(NULL, stm32_pll[n].name,
+ stm32_pll[n].parent_name, stm32_pll[n].flags,
+ stm32_pll[n].cfg,
+ &stm32rcc_lock);
+
+ /* Register the 3 output dividers */
+ for (odf = 0; odf < 3; odf++) {
+ int idx = n * 3 + odf;
+
+ get_cfg_composite_div(&odf_clk_gcfg, &stm32_odf[n][odf],
+ &c_cfg, &stm32rcc_lock);
+
+ hws[ODF_BANK + idx] = clk_hw_register_composite(NULL,
+ stm32_odf[n][odf].name,
+ stm32_odf[n][odf].parent_name,
+ stm32_odf[n][odf].num_parents,
+ c_cfg.mux_hw, c_cfg.mux_ops,
+ c_cfg.div_hw, c_cfg.div_ops,
+ c_cfg.gate_hw, c_cfg.gate_ops,
+ stm32_odf[n][odf].flags);
+ }
+ }
+
+ /* Peripheral clocks */
+ for (n = 0; n < ARRAY_SIZE(pclk); n++)
+ hws[PERIF_BANK + n] = clk_hw_register_gate(NULL, pclk[n].name,
+ pclk[n].parent,
+ pclk[n].flags, base + pclk[n].gate_offset,
+ pclk[n].bit_idx, pclk[n].flags, &stm32rcc_lock);
+
+ /* Kernel clocks */
+ for (n = 0; n < ARRAY_SIZE(kclk); n++) {
+ get_cfg_composite_div(&kernel_clk_cfg, &kclk[n], &c_cfg,
+ &stm32rcc_lock);
+
+ hws[KERN_BANK + n] = clk_hw_register_composite(NULL,
+ kclk[n].name,
+ kclk[n].parent_name,
+ kclk[n].num_parents,
+ c_cfg.mux_hw, c_cfg.mux_ops,
+ c_cfg.div_hw, c_cfg.div_ops,
+ c_cfg.gate_hw, c_cfg.gate_ops,
+ kclk[n].flags);
+ }
+
+ /* RTC clock (default state is off) */
+ clk_hw_register_fixed_rate(NULL, "off", NULL, 0, 0);
+
+ get_cfg_composite_div(&rtc_clk_cfg, &rtc_clk, &c_cfg, &stm32rcc_lock);
+
+ hws[RTC_CK] = clk_hw_register_composite(NULL,
+ rtc_clk.name,
+ rtc_clk.parent_name,
+ rtc_clk.num_parents,
+ c_cfg.mux_hw, c_cfg.mux_ops,
+ c_cfg.div_hw, c_cfg.div_ops,
+ c_cfg.gate_hw, c_cfg.gate_ops,
+ rtc_clk.flags);
+
+ /* Micro-controller clocks */
+ for (n = 0; n < ARRAY_SIZE(mco_clk); n++) {
+ get_cfg_composite_div(&mco_clk_cfg, &mco_clk[n], &c_cfg,
+ &stm32rcc_lock);
+
+ hws[MCO_BANK + n] = clk_hw_register_composite(NULL,
+ mco_clk[n].name,
+ mco_clk[n].parent_name,
+ mco_clk[n].num_parents,
+ c_cfg.mux_hw, c_cfg.mux_ops,
+ c_cfg.div_hw, c_cfg.div_ops,
+ c_cfg.gate_hw, c_cfg.gate_ops,
+ mco_clk[n].flags);
+ }
+
+ of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_data);
+
+ return;
+
+err_free_clks:
+ kfree(clk_data);
+}
+
+/* The RCC node is a clock and reset controller, and these
+ * functionalities are supported by different drivers that
+ * matches the same compatible strings.
+ */
+CLK_OF_DECLARE_DRIVER(stm32h7_rcc, "st,stm32h743-rcc", stm32h7_rcc_init);
diff --git a/drivers/clk/clk-versaclock5.c b/drivers/clk/clk-versaclock5.c
index ea7d552a2f2b..decffb3826ec 100644
--- a/drivers/clk/clk-versaclock5.c
+++ b/drivers/clk/clk-versaclock5.c
@@ -57,6 +57,7 @@
#define VC5_PRIM_SRC_SHDN 0x10
#define VC5_PRIM_SRC_SHDN_EN_XTAL BIT(7)
#define VC5_PRIM_SRC_SHDN_EN_CLKIN BIT(6)
+#define VC5_PRIM_SRC_SHDN_EN_DOUBLE_XTAL_FREQ BIT(3)
#define VC5_PRIM_SRC_SHDN_SP BIT(1)
#define VC5_PRIM_SRC_SHDN_EN_GBL_SHDN BIT(0)
@@ -122,12 +123,16 @@
/* flags to describe chip features */
/* chip has built-in oscilator */
#define VC5_HAS_INTERNAL_XTAL BIT(0)
+/* chip has PFD requency doubler */
+#define VC5_HAS_PFD_FREQ_DBL BIT(1)
/* Supported IDT VC5 models. */
enum vc5_model {
IDT_VC5_5P49V5923,
+ IDT_VC5_5P49V5925,
IDT_VC5_5P49V5933,
IDT_VC5_5P49V5935,
+ IDT_VC6_5P49V6901,
};
/* Structure to describe features of a particular VC5 model */
@@ -157,6 +162,8 @@ struct vc5_driver_data {
struct clk *pin_clkin;
unsigned char clk_mux_ins;
struct clk_hw clk_mux;
+ struct clk_hw clk_mul;
+ struct clk_hw clk_pfd;
struct vc5_hw_data clk_pll;
struct vc5_hw_data clk_fod[VC5_MAX_FOD_NUM];
struct vc5_hw_data clk_out[VC5_MAX_CLK_OUT_NUM];
@@ -166,6 +173,14 @@ static const char * const vc5_mux_names[] = {
"mux"
};
+static const char * const vc5_dbl_names[] = {
+ "dbl"
+};
+
+static const char * const vc5_pfd_names[] = {
+ "pfd"
+};
+
static const char * const vc5_pll_names[] = {
"pll"
};
@@ -254,11 +269,64 @@ static int vc5_mux_set_parent(struct clk_hw *hw, u8 index)
return regmap_update_bits(vc5->regmap, VC5_PRIM_SRC_SHDN, mask, src);
}
-static unsigned long vc5_mux_recalc_rate(struct clk_hw *hw,
+static const struct clk_ops vc5_mux_ops = {
+ .set_parent = vc5_mux_set_parent,
+ .get_parent = vc5_mux_get_parent,
+};
+
+static unsigned long vc5_dbl_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct vc5_driver_data *vc5 =
- container_of(hw, struct vc5_driver_data, clk_mux);
+ container_of(hw, struct vc5_driver_data, clk_mul);
+ unsigned int premul;
+
+ regmap_read(vc5->regmap, VC5_PRIM_SRC_SHDN, &premul);
+ if (premul & VC5_PRIM_SRC_SHDN_EN_DOUBLE_XTAL_FREQ)
+ parent_rate *= 2;
+
+ return parent_rate;
+}
+
+static long vc5_dbl_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ if ((*parent_rate == rate) || ((*parent_rate * 2) == rate))
+ return rate;
+ else
+ return -EINVAL;
+}
+
+static int vc5_dbl_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct vc5_driver_data *vc5 =
+ container_of(hw, struct vc5_driver_data, clk_mul);
+ u32 mask;
+
+ if ((parent_rate * 2) == rate)
+ mask = VC5_PRIM_SRC_SHDN_EN_DOUBLE_XTAL_FREQ;
+ else
+ mask = 0;
+
+ regmap_update_bits(vc5->regmap, VC5_PRIM_SRC_SHDN,
+ VC5_PRIM_SRC_SHDN_EN_DOUBLE_XTAL_FREQ,
+ mask);
+
+ return 0;
+}
+
+static const struct clk_ops vc5_dbl_ops = {
+ .recalc_rate = vc5_dbl_recalc_rate,
+ .round_rate = vc5_dbl_round_rate,
+ .set_rate = vc5_dbl_set_rate,
+};
+
+static unsigned long vc5_pfd_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct vc5_driver_data *vc5 =
+ container_of(hw, struct vc5_driver_data, clk_pfd);
unsigned int prediv, div;
regmap_read(vc5->regmap, VC5_VCO_CTRL_AND_PREDIV, &prediv);
@@ -276,7 +344,7 @@ static unsigned long vc5_mux_recalc_rate(struct clk_hw *hw,
return parent_rate / VC5_REF_DIVIDER_REF_DIV(div);
}
-static long vc5_mux_round_rate(struct clk_hw *hw, unsigned long rate,
+static long vc5_pfd_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *parent_rate)
{
unsigned long idiv;
@@ -296,11 +364,11 @@ static long vc5_mux_round_rate(struct clk_hw *hw, unsigned long rate,
return *parent_rate / idiv;
}
-static int vc5_mux_set_rate(struct clk_hw *hw, unsigned long rate,
+static int vc5_pfd_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct vc5_driver_data *vc5 =
- container_of(hw, struct vc5_driver_data, clk_mux);
+ container_of(hw, struct vc5_driver_data, clk_pfd);
unsigned long idiv;
u8 div;
@@ -328,12 +396,10 @@ static int vc5_mux_set_rate(struct clk_hw *hw, unsigned long rate,
return 0;
}
-static const struct clk_ops vc5_mux_ops = {
- .set_parent = vc5_mux_set_parent,
- .get_parent = vc5_mux_get_parent,
- .recalc_rate = vc5_mux_recalc_rate,
- .round_rate = vc5_mux_round_rate,
- .set_rate = vc5_mux_set_rate,
+static const struct clk_ops vc5_pfd_ops = {
+ .recalc_rate = vc5_pfd_recalc_rate,
+ .round_rate = vc5_pfd_round_rate,
+ .set_rate = vc5_pfd_set_rate,
};
/*
@@ -426,6 +492,10 @@ static unsigned long vc5_fod_recalc_rate(struct clk_hw *hw,
div_frc = (od_frc[0] << 22) | (od_frc[1] << 14) |
(od_frc[2] << 6) | (od_frc[3] >> 2);
+ /* Avoid division by zero if the output is not configured. */
+ if (div_int == 0 && div_frc == 0)
+ return 0;
+
/* The PLL divider has 12 integer bits and 30 fractional bits */
return div64_u64((u64)f_in << 24ULL, ((u64)div_int << 24ULL) + div_frc);
}
@@ -503,6 +573,25 @@ static int vc5_clk_out_prepare(struct clk_hw *hw)
{
struct vc5_hw_data *hwdata = container_of(hw, struct vc5_hw_data, hw);
struct vc5_driver_data *vc5 = hwdata->vc5;
+ const u8 mask = VC5_OUT_DIV_CONTROL_SELB_NORM |
+ VC5_OUT_DIV_CONTROL_SEL_EXT |
+ VC5_OUT_DIV_CONTROL_EN_FOD;
+ unsigned int src;
+ int ret;
+
+ /*
+ * If the input mux is disabled, enable it first and
+ * select source from matching FOD.
+ */
+ regmap_read(vc5->regmap, VC5_OUT_DIV_CONTROL(hwdata->num), &src);
+ if ((src & mask) == 0) {
+ src = VC5_OUT_DIV_CONTROL_RESET | VC5_OUT_DIV_CONTROL_EN_FOD;
+ ret = regmap_update_bits(vc5->regmap,
+ VC5_OUT_DIV_CONTROL(hwdata->num),
+ mask | VC5_OUT_DIV_CONTROL_RESET, src);
+ if (ret)
+ return ret;
+ }
/* Enable the clock buffer */
regmap_update_bits(vc5->regmap, VC5_CLK_OUTPUT_CFG(hwdata->num, 1),
@@ -516,7 +605,7 @@ static void vc5_clk_out_unprepare(struct clk_hw *hw)
struct vc5_hw_data *hwdata = container_of(hw, struct vc5_hw_data, hw);
struct vc5_driver_data *vc5 = hwdata->vc5;
- /* Enable the clock buffer */
+ /* Disable the clock buffer */
regmap_update_bits(vc5->regmap, VC5_CLK_OUTPUT_CFG(hwdata->num, 1),
VC5_CLK_OUTPUT_CFG1_EN_CLKBUF, 0);
}
@@ -537,6 +626,9 @@ static unsigned char vc5_clk_out_get_parent(struct clk_hw *hw)
regmap_read(vc5->regmap, VC5_OUT_DIV_CONTROL(hwdata->num), &src);
src &= mask;
+ if (src == 0) /* Input mux set to DISABLED */
+ return 0;
+
if ((src & fodclkmask) == VC5_OUT_DIV_CONTROL_EN_FOD)
return 0;
@@ -595,7 +687,9 @@ static int vc5_map_index_to_output(const enum vc5_model model,
case IDT_VC5_5P49V5933:
return (n == 0) ? 0 : 3;
case IDT_VC5_5P49V5923:
+ case IDT_VC5_5P49V5925:
case IDT_VC5_5P49V5935:
+ case IDT_VC6_5P49V6901:
default:
return n;
}
@@ -672,12 +766,46 @@ static int vc5_probe(struct i2c_client *client,
goto err_clk;
}
+ if (vc5->chip_info->flags & VC5_HAS_PFD_FREQ_DBL) {
+ /* Register frequency doubler */
+ memset(&init, 0, sizeof(init));
+ init.name = vc5_dbl_names[0];
+ init.ops = &vc5_dbl_ops;
+ init.flags = CLK_SET_RATE_PARENT;
+ init.parent_names = vc5_mux_names;
+ init.num_parents = 1;
+ vc5->clk_mul.init = &init;
+ ret = devm_clk_hw_register(&client->dev, &vc5->clk_mul);
+ if (ret) {
+ dev_err(&client->dev, "unable to register %s\n",
+ init.name);
+ goto err_clk;
+ }
+ }
+
+ /* Register PFD */
+ memset(&init, 0, sizeof(init));
+ init.name = vc5_pfd_names[0];
+ init.ops = &vc5_pfd_ops;
+ init.flags = CLK_SET_RATE_PARENT;
+ if (vc5->chip_info->flags & VC5_HAS_PFD_FREQ_DBL)
+ init.parent_names = vc5_dbl_names;
+ else
+ init.parent_names = vc5_mux_names;
+ init.num_parents = 1;
+ vc5->clk_pfd.init = &init;
+ ret = devm_clk_hw_register(&client->dev, &vc5->clk_pfd);
+ if (ret) {
+ dev_err(&client->dev, "unable to register %s\n", init.name);
+ goto err_clk;
+ }
+
/* Register PLL */
memset(&init, 0, sizeof(init));
init.name = vc5_pll_names[0];
init.ops = &vc5_pll_ops;
init.flags = CLK_SET_RATE_PARENT;
- init.parent_names = vc5_mux_names;
+ init.parent_names = vc5_pfd_names;
init.num_parents = 1;
vc5->clk_pll.num = 0;
vc5->clk_pll.vc5 = vc5;
@@ -785,6 +913,13 @@ static const struct vc5_chip_info idt_5p49v5923_info = {
.flags = 0,
};
+static const struct vc5_chip_info idt_5p49v5925_info = {
+ .model = IDT_VC5_5P49V5925,
+ .clk_fod_cnt = 4,
+ .clk_out_cnt = 5,
+ .flags = 0,
+};
+
static const struct vc5_chip_info idt_5p49v5933_info = {
.model = IDT_VC5_5P49V5933,
.clk_fod_cnt = 2,
@@ -799,18 +934,29 @@ static const struct vc5_chip_info idt_5p49v5935_info = {
.flags = VC5_HAS_INTERNAL_XTAL,
};
+static const struct vc5_chip_info idt_5p49v6901_info = {
+ .model = IDT_VC6_5P49V6901,
+ .clk_fod_cnt = 4,
+ .clk_out_cnt = 5,
+ .flags = VC5_HAS_PFD_FREQ_DBL,
+};
+
static const struct i2c_device_id vc5_id[] = {
{ "5p49v5923", .driver_data = IDT_VC5_5P49V5923 },
+ { "5p49v5925", .driver_data = IDT_VC5_5P49V5925 },
{ "5p49v5933", .driver_data = IDT_VC5_5P49V5933 },
{ "5p49v5935", .driver_data = IDT_VC5_5P49V5935 },
+ { "5p49v6901", .driver_data = IDT_VC6_5P49V6901 },
{ }
};
MODULE_DEVICE_TABLE(i2c, vc5_id);
static const struct of_device_id clk_vc5_of_match[] = {
{ .compatible = "idt,5p49v5923", .data = &idt_5p49v5923_info },
+ { .compatible = "idt,5p49v5925", .data = &idt_5p49v5925_info },
{ .compatible = "idt,5p49v5933", .data = &idt_5p49v5933_info },
{ .compatible = "idt,5p49v5935", .data = &idt_5p49v5935_info },
+ { .compatible = "idt,5p49v6901", .data = &idt_5p49v6901_info },
{ },
};
MODULE_DEVICE_TABLE(of, clk_vc5_of_match);
diff --git a/drivers/clk/clk-xgene.c b/drivers/clk/clk-xgene.c
index bc37030e38ba..4c75821a3933 100644
--- a/drivers/clk/clk-xgene.c
+++ b/drivers/clk/clk-xgene.c
@@ -192,7 +192,7 @@ static void xgene_pllclk_init(struct device_node *np, enum xgene_pll_type pll_ty
reg = of_iomap(np, 0);
if (reg == NULL) {
- pr_err("Unable to map CSR register for %s\n", np->full_name);
+ pr_err("Unable to map CSR register for %pOF\n", np);
return;
}
of_property_read_string(np, "clock-output-names", &clk_name);
@@ -409,12 +409,12 @@ static void xgene_pmdclk_init(struct device_node *np)
/* Parse the DTS register for resource */
rc = of_address_to_resource(np, 0, &res);
if (rc != 0) {
- pr_err("no DTS register for %s\n", np->full_name);
+ pr_err("no DTS register for %pOF\n", np);
return;
}
csr_reg = of_iomap(np, 0);
if (!csr_reg) {
- pr_err("Unable to map resource for %s\n", np->full_name);
+ pr_err("Unable to map resource for %pOF\n", np);
return;
}
of_property_read_string(np, "clock-output-names", &clk_name);
@@ -703,16 +703,14 @@ static void __init xgene_devclk_init(struct device_node *np)
rc = of_address_to_resource(np, i, &res);
if (rc != 0) {
if (i == 0) {
- pr_err("no DTS register for %s\n",
- np->full_name);
+ pr_err("no DTS register for %pOF\n", np);
return;
}
break;
}
map_res = of_iomap(np, i);
if (map_res == NULL) {
- pr_err("Unable to map resource %d for %s\n",
- i, np->full_name);
+ pr_err("Unable to map resource %d for %pOF\n", i, np);
goto err;
}
if (strcmp(res.name, "div-reg") == 0)
@@ -747,8 +745,7 @@ static void __init xgene_devclk_init(struct device_node *np)
pr_debug("Add %s clock\n", clk_name);
rc = of_clk_add_provider(np, of_clk_src_simple_get, clk);
if (rc != 0)
- pr_err("%s: could register provider clk %s\n", __func__,
- np->full_name);
+ pr_err("%s: could register provider clk %pOF\n", __func__, np);
return;
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index fc58c52a26b4..c8d83acda006 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -3132,7 +3132,7 @@ int of_clk_add_provider(struct device_node *np,
mutex_lock(&of_clk_mutex);
list_add(&cp->link, &of_clk_providers);
mutex_unlock(&of_clk_mutex);
- pr_debug("Added clock from %s\n", np->full_name);
+ pr_debug("Added clock from %pOF\n", np);
ret = of_clk_set_defaults(np, true);
if (ret < 0)
@@ -3167,7 +3167,7 @@ int of_clk_add_hw_provider(struct device_node *np,
mutex_lock(&of_clk_mutex);
list_add(&cp->link, &of_clk_providers);
mutex_unlock(&of_clk_mutex);
- pr_debug("Added clk_hw provider from %s\n", np->full_name);
+ pr_debug("Added clk_hw provider from %pOF\n", np);
ret = of_clk_set_defaults(np, true);
if (ret < 0)
diff --git a/drivers/clk/clkdev.c b/drivers/clk/clkdev.c
index bb8a77a5985f..6b2f29df3f70 100644
--- a/drivers/clk/clkdev.c
+++ b/drivers/clk/clkdev.c
@@ -77,8 +77,8 @@ static struct clk *__of_clk_get_by_name(struct device_node *np,
break;
} else if (name && index >= 0) {
if (PTR_ERR(clk) != -EPROBE_DEFER)
- pr_err("ERROR: could not get clock %s:%s(%i)\n",
- np->full_name, name ? name : "", index);
+ pr_err("ERROR: could not get clock %pOF:%s(%i)\n",
+ np, name ? name : "", index);
return clk;
}
diff --git a/drivers/clk/hisilicon/clk-hi6220.c b/drivers/clk/hisilicon/clk-hi6220.c
index 4181b6808545..e786d717f75d 100644
--- a/drivers/clk/hisilicon/clk-hi6220.c
+++ b/drivers/clk/hisilicon/clk-hi6220.c
@@ -55,9 +55,9 @@ static struct hisi_fixed_factor_clock hi6220_fixed_factor_clks[] __initdata = {
};
static struct hisi_gate_clock hi6220_separated_gate_clks_ao[] __initdata = {
- { HI6220_WDT0_PCLK, "wdt0_pclk", "clk_tcxo", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x630, 12, 0, },
- { HI6220_WDT1_PCLK, "wdt1_pclk", "clk_tcxo", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x630, 13, 0, },
- { HI6220_WDT2_PCLK, "wdt2_pclk", "clk_tcxo", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x630, 14, 0, },
+ { HI6220_WDT0_PCLK, "wdt0_pclk", "ref32k", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x630, 12, 0, },
+ { HI6220_WDT1_PCLK, "wdt1_pclk", "ref32k", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x630, 13, 0, },
+ { HI6220_WDT2_PCLK, "wdt2_pclk", "ref32k", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x630, 14, 0, },
{ HI6220_TIMER0_PCLK, "timer0_pclk", "clk_tcxo", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x630, 15, 0, },
{ HI6220_TIMER1_PCLK, "timer1_pclk", "clk_tcxo", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x630, 16, 0, },
{ HI6220_TIMER2_PCLK, "timer2_pclk", "clk_tcxo", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x630, 17, 0, },
diff --git a/drivers/clk/imx/clk-imx51-imx53.c b/drivers/clk/imx/clk-imx51-imx53.c
index 1e3c9ea5f9dc..7bcaf270db11 100644
--- a/drivers/clk/imx/clk-imx51-imx53.c
+++ b/drivers/clk/imx/clk-imx51-imx53.c
@@ -416,10 +416,10 @@ static void __init mx51_clocks_init(struct device_node *np)
clk[IMX5_CLK_LP_APM] = imx_clk_mux("lp_apm", MXC_CCM_CCSR, 9, 1,
lp_apm_sel, ARRAY_SIZE(lp_apm_sel));
- clk[IMX5_CLK_IPU_DI0_SEL] = imx_clk_mux("ipu_di0_sel", MXC_CCM_CSCMR2, 26, 3,
- mx51_ipu_di0_sel, ARRAY_SIZE(mx51_ipu_di0_sel));
- clk[IMX5_CLK_IPU_DI1_SEL] = imx_clk_mux("ipu_di1_sel", MXC_CCM_CSCMR2, 29, 3,
- mx51_ipu_di1_sel, ARRAY_SIZE(mx51_ipu_di1_sel));
+ clk[IMX5_CLK_IPU_DI0_SEL] = imx_clk_mux_flags("ipu_di0_sel", MXC_CCM_CSCMR2, 26, 3,
+ mx51_ipu_di0_sel, ARRAY_SIZE(mx51_ipu_di0_sel), CLK_SET_RATE_PARENT);
+ clk[IMX5_CLK_IPU_DI1_SEL] = imx_clk_mux_flags("ipu_di1_sel", MXC_CCM_CSCMR2, 29, 3,
+ mx51_ipu_di1_sel, ARRAY_SIZE(mx51_ipu_di1_sel), CLK_SET_RATE_PARENT);
clk[IMX5_CLK_TVE_EXT_SEL] = imx_clk_mux_flags("tve_ext_sel", MXC_CCM_CSCMR1, 6, 1,
mx51_tve_ext_sel, ARRAY_SIZE(mx51_tve_ext_sel), CLK_SET_RATE_PARENT);
clk[IMX5_CLK_TVE_SEL] = imx_clk_mux("tve_sel", MXC_CCM_CSCMR1, 7, 1,
diff --git a/drivers/clk/imx/clk-imx6sl.c b/drivers/clk/imx/clk-imx6sl.c
index 5fd4ddac1bf1..9642cdf0fb88 100644
--- a/drivers/clk/imx/clk-imx6sl.c
+++ b/drivers/clk/imx/clk-imx6sl.c
@@ -71,7 +71,7 @@ static const char *pll5_bypass_sels[] = { "pll5", "pll5_bypass_src", };
static const char *pll6_bypass_sels[] = { "pll6", "pll6_bypass_src", };
static const char *pll7_bypass_sels[] = { "pll7", "pll7_bypass_src", };
-static struct clk_div_table clk_enet_ref_table[] = {
+static const struct clk_div_table clk_enet_ref_table[] = {
{ .val = 0, .div = 20, },
{ .val = 1, .div = 10, },
{ .val = 2, .div = 5, },
@@ -79,14 +79,14 @@ static struct clk_div_table clk_enet_ref_table[] = {
{ }
};
-static struct clk_div_table post_div_table[] = {
+static const struct clk_div_table post_div_table[] = {
{ .val = 2, .div = 1, },
{ .val = 1, .div = 2, },
{ .val = 0, .div = 4, },
{ }
};
-static struct clk_div_table video_div_table[] = {
+static const struct clk_div_table video_div_table[] = {
{ .val = 0, .div = 1, },
{ .val = 1, .div = 2, },
{ .val = 2, .div = 1, },
diff --git a/drivers/clk/imx/clk-imx6sx.c b/drivers/clk/imx/clk-imx6sx.c
index b5c96de41ccf..e6d389e333d7 100644
--- a/drivers/clk/imx/clk-imx6sx.c
+++ b/drivers/clk/imx/clk-imx6sx.c
@@ -105,7 +105,7 @@ static int const clks_init_on[] __initconst = {
IMX6SX_CLK_EPIT2,
};
-static struct clk_div_table clk_enet_ref_table[] = {
+static const struct clk_div_table clk_enet_ref_table[] = {
{ .val = 0, .div = 20, },
{ .val = 1, .div = 10, },
{ .val = 2, .div = 5, },
@@ -113,14 +113,14 @@ static struct clk_div_table clk_enet_ref_table[] = {
{ }
};
-static struct clk_div_table post_div_table[] = {
+static const struct clk_div_table post_div_table[] = {
{ .val = 2, .div = 1, },
{ .val = 1, .div = 2, },
{ .val = 0, .div = 4, },
{ }
};
-static struct clk_div_table video_div_table[] = {
+static const struct clk_div_table video_div_table[] = {
{ .val = 0, .div = 1, },
{ .val = 1, .div = 2, },
{ .val = 2, .div = 1, },
diff --git a/drivers/clk/imx/clk-imx6ul.c b/drivers/clk/imx/clk-imx6ul.c
index b4e0dff3c8c2..5e8c18afce9a 100644
--- a/drivers/clk/imx/clk-imx6ul.c
+++ b/drivers/clk/imx/clk-imx6ul.c
@@ -78,7 +78,7 @@ static int const clks_init_on[] __initconst = {
IMX6UL_CLK_MMDC_P0_FAST, IMX6UL_CLK_MMDC_P0_IPG,
};
-static struct clk_div_table clk_enet_ref_table[] = {
+static const struct clk_div_table clk_enet_ref_table[] = {
{ .val = 0, .div = 20, },
{ .val = 1, .div = 10, },
{ .val = 2, .div = 5, },
@@ -86,14 +86,14 @@ static struct clk_div_table clk_enet_ref_table[] = {
{ }
};
-static struct clk_div_table post_div_table[] = {
+static const struct clk_div_table post_div_table[] = {
{ .val = 2, .div = 1, },
{ .val = 1, .div = 2, },
{ .val = 0, .div = 4, },
{ }
};
-static struct clk_div_table video_div_table[] = {
+static const struct clk_div_table video_div_table[] = {
{ .val = 0, .div = 1, },
{ .val = 1, .div = 2, },
{ .val = 2, .div = 1, },
diff --git a/drivers/clk/imx/clk-imx7d.c b/drivers/clk/imx/clk-imx7d.c
index 3da121826b1b..2305699db467 100644
--- a/drivers/clk/imx/clk-imx7d.c
+++ b/drivers/clk/imx/clk-imx7d.c
@@ -27,7 +27,7 @@ static u32 share_count_sai2;
static u32 share_count_sai3;
static u32 share_count_nand;
-static struct clk_div_table test_div_table[] = {
+static const struct clk_div_table test_div_table[] = {
{ .val = 3, .div = 1, },
{ .val = 2, .div = 1, },
{ .val = 1, .div = 2, },
@@ -35,7 +35,7 @@ static struct clk_div_table test_div_table[] = {
{ }
};
-static struct clk_div_table post_div_table[] = {
+static const struct clk_div_table post_div_table[] = {
{ .val = 3, .div = 4, },
{ .val = 2, .div = 1, },
{ .val = 1, .div = 2, },
diff --git a/drivers/clk/imx/clk-vf610.c b/drivers/clk/imx/clk-vf610.c
index 59b1863deb88..6dae54325a91 100644
--- a/drivers/clk/imx/clk-vf610.c
+++ b/drivers/clk/imx/clk-vf610.c
@@ -102,7 +102,7 @@ static const char *ftm_ext_sels[] = {"sirc_128k", "sxosc", "fxosc_half", "audio_
static const char *ftm_fix_sels[] = { "sxosc", "ipg_bus", };
-static struct clk_div_table pll4_audio_div_table[] = {
+static const struct clk_div_table pll4_audio_div_table[] = {
{ .val = 0, .div = 1 },
{ .val = 1, .div = 2 },
{ .val = 2, .div = 6 },
diff --git a/drivers/clk/mediatek/clk-cpumux.c b/drivers/clk/mediatek/clk-cpumux.c
index edd8e6918050..16e56772d280 100644
--- a/drivers/clk/mediatek/clk-cpumux.c
+++ b/drivers/clk/mediatek/clk-cpumux.c
@@ -27,7 +27,6 @@ static inline struct mtk_clk_cpumux *to_mtk_clk_cpumux(struct clk_hw *_hw)
static u8 clk_cpumux_get_parent(struct clk_hw *hw)
{
struct mtk_clk_cpumux *mux = to_mtk_clk_cpumux(hw);
- int num_parents = clk_hw_get_num_parents(hw);
unsigned int val;
regmap_read(mux->regmap, mux->reg, &val);
@@ -35,9 +34,6 @@ static u8 clk_cpumux_get_parent(struct clk_hw *hw)
val >>= mux->shift;
val &= mux->mask;
- if (val >= num_parents)
- return -EINVAL;
-
return val;
}
@@ -98,7 +94,7 @@ int __init mtk_clk_register_cpumuxes(struct device_node *node,
regmap = syscon_node_to_regmap(node);
if (IS_ERR(regmap)) {
- pr_err("Cannot find regmap for %s: %ld\n", node->full_name,
+ pr_err("Cannot find regmap for %pOF: %ld\n", node,
PTR_ERR(regmap));
return PTR_ERR(regmap);
}
diff --git a/drivers/clk/mediatek/clk-mtk.c b/drivers/clk/mediatek/clk-mtk.c
index 0541df78141c..9c0ae4278a94 100644
--- a/drivers/clk/mediatek/clk-mtk.c
+++ b/drivers/clk/mediatek/clk-mtk.c
@@ -114,7 +114,7 @@ int mtk_clk_register_gates(struct device_node *node,
regmap = syscon_node_to_regmap(node);
if (IS_ERR(regmap)) {
- pr_err("Cannot find regmap for %s: %ld\n", node->full_name,
+ pr_err("Cannot find regmap for %pOF: %ld\n", node,
PTR_ERR(regmap));
return PTR_ERR(regmap);
}
diff --git a/drivers/clk/mediatek/reset.c b/drivers/clk/mediatek/reset.c
index 309049d41f1b..d3551d5efef2 100644
--- a/drivers/clk/mediatek/reset.c
+++ b/drivers/clk/mediatek/reset.c
@@ -72,7 +72,7 @@ void mtk_register_reset_controller(struct device_node *np,
regmap = syscon_node_to_regmap(np);
if (IS_ERR(regmap)) {
- pr_err("Cannot find regmap for %s: %ld\n", np->full_name,
+ pr_err("Cannot find regmap for %pOF: %ld\n", np,
PTR_ERR(regmap));
return;
}
diff --git a/drivers/clk/meson/Kconfig b/drivers/clk/meson/Kconfig
index 5588f75a8414..d2d0174a6eca 100644
--- a/drivers/clk/meson/Kconfig
+++ b/drivers/clk/meson/Kconfig
@@ -6,6 +6,7 @@ config COMMON_CLK_AMLOGIC
config COMMON_CLK_MESON8B
bool
depends on COMMON_CLK_AMLOGIC
+ select RESET_CONTROLLER
help
Support for the clock controller on AmLogic S802 (Meson8),
S805 (Meson8b) and S812 (Meson8m2) devices. Say Y if you
diff --git a/drivers/clk/meson/Makefile b/drivers/clk/meson/Makefile
index 83b6d9d65aa1..b139d41b25da 100644
--- a/drivers/clk/meson/Makefile
+++ b/drivers/clk/meson/Makefile
@@ -4,4 +4,4 @@
obj-$(CONFIG_COMMON_CLK_AMLOGIC) += clk-pll.o clk-cpu.o clk-mpll.o clk-audio-divider.o
obj-$(CONFIG_COMMON_CLK_MESON8B) += meson8b.o
-obj-$(CONFIG_COMMON_CLK_GXBB) += gxbb.o gxbb-aoclk.o
+obj-$(CONFIG_COMMON_CLK_GXBB) += gxbb.o gxbb-aoclk.o gxbb-aoclk-regmap.o gxbb-aoclk-32k.o
diff --git a/drivers/clk/meson/gxbb-aoclk-32k.c b/drivers/clk/meson/gxbb-aoclk-32k.c
new file mode 100644
index 000000000000..491634dbc985
--- /dev/null
+++ b/drivers/clk/meson/gxbb-aoclk-32k.c
@@ -0,0 +1,194 @@
+/*
+ * Copyright (c) 2017 BayLibre, SAS.
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0+
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/bitfield.h>
+#include <linux/regmap.h>
+#include "gxbb-aoclk.h"
+
+/*
+ * The AO Domain embeds a dual/divider to generate a more precise
+ * 32,768KHz clock for low-power suspend mode and CEC.
+ * ______ ______
+ * | | | |
+ * ______ | Div1 |-| Cnt1 | ______
+ * | | /|______| |______|\ | |
+ * Xtal-->| Gate |---| ______ ______ X-X--| Gate |-->
+ * |______| | \| | | |/ | |______|
+ * | | Div2 |-| Cnt2 | |
+ * | |______| |______| |
+ * |_______________________|
+ *
+ * The dividing can be switched to single or dual, with a counter
+ * for each divider to set when the switching is done.
+ * The entire dividing mechanism can be also bypassed.
+ */
+
+#define CLK_CNTL0_N1_MASK GENMASK(11, 0)
+#define CLK_CNTL0_N2_MASK GENMASK(23, 12)
+#define CLK_CNTL0_DUALDIV_EN BIT(28)
+#define CLK_CNTL0_OUT_GATE_EN BIT(30)
+#define CLK_CNTL0_IN_GATE_EN BIT(31)
+
+#define CLK_CNTL1_M1_MASK GENMASK(11, 0)
+#define CLK_CNTL1_M2_MASK GENMASK(23, 12)
+#define CLK_CNTL1_BYPASS_EN BIT(24)
+#define CLK_CNTL1_SELECT_OSC BIT(27)
+
+#define PWR_CNTL_ALT_32K_SEL GENMASK(13, 10)
+
+struct cec_32k_freq_table {
+ unsigned long parent_rate;
+ unsigned long target_rate;
+ bool dualdiv;
+ unsigned int n1;
+ unsigned int n2;
+ unsigned int m1;
+ unsigned int m2;
+};
+
+static const struct cec_32k_freq_table aoclk_cec_32k_table[] = {
+ [0] = {
+ .parent_rate = 24000000,
+ .target_rate = 32768,
+ .dualdiv = true,
+ .n1 = 733,
+ .n2 = 732,
+ .m1 = 8,
+ .m2 = 11,
+ },
+};
+
+/*
+ * If CLK_CNTL0_DUALDIV_EN == 0
+ * - will use N1 divider only
+ * If CLK_CNTL0_DUALDIV_EN == 1
+ * - hold M1 cycles of N1 divider then changes to N2
+ * - hold M2 cycles of N2 divider then changes to N1
+ * Then we can get more accurate division.
+ */
+static unsigned long aoclk_cec_32k_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct aoclk_cec_32k *cec_32k = to_aoclk_cec_32k(hw);
+ unsigned long n1;
+ u32 reg0, reg1;
+
+ regmap_read(cec_32k->regmap, AO_RTC_ALT_CLK_CNTL0, &reg0);
+ regmap_read(cec_32k->regmap, AO_RTC_ALT_CLK_CNTL1, &reg1);
+
+ if (reg1 & CLK_CNTL1_BYPASS_EN)
+ return parent_rate;
+
+ if (reg0 & CLK_CNTL0_DUALDIV_EN) {
+ unsigned long n2, m1, m2, f1, f2, p1, p2;
+
+ n1 = FIELD_GET(CLK_CNTL0_N1_MASK, reg0) + 1;
+ n2 = FIELD_GET(CLK_CNTL0_N2_MASK, reg0) + 1;
+
+ m1 = FIELD_GET(CLK_CNTL1_M1_MASK, reg1) + 1;
+ m2 = FIELD_GET(CLK_CNTL1_M2_MASK, reg1) + 1;
+
+ f1 = DIV_ROUND_CLOSEST(parent_rate, n1);
+ f2 = DIV_ROUND_CLOSEST(parent_rate, n2);
+
+ p1 = DIV_ROUND_CLOSEST(100000000 * m1, f1 * (m1 + m2));
+ p2 = DIV_ROUND_CLOSEST(100000000 * m2, f2 * (m1 + m2));
+
+ return DIV_ROUND_UP(100000000, p1 + p2);
+ }
+
+ n1 = FIELD_GET(CLK_CNTL0_N1_MASK, reg0) + 1;
+
+ return DIV_ROUND_CLOSEST(parent_rate, n1);
+}
+
+static const struct cec_32k_freq_table *find_cec_32k_freq(unsigned long rate,
+ unsigned long prate)
+{
+ int i;
+
+ for (i = 0 ; i < ARRAY_SIZE(aoclk_cec_32k_table) ; ++i)
+ if (aoclk_cec_32k_table[i].parent_rate == prate &&
+ aoclk_cec_32k_table[i].target_rate == rate)
+ return &aoclk_cec_32k_table[i];
+
+ return NULL;
+}
+
+static long aoclk_cec_32k_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ const struct cec_32k_freq_table *freq = find_cec_32k_freq(rate,
+ *prate);
+
+ /* If invalid return first one */
+ if (!freq)
+ return aoclk_cec_32k_table[0].target_rate;
+
+ return freq->target_rate;
+}
+
+/*
+ * From the Amlogic init procedure, the IN and OUT gates needs to be handled
+ * in the init procedure to avoid any glitches.
+ */
+
+static int aoclk_cec_32k_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ const struct cec_32k_freq_table *freq = find_cec_32k_freq(rate,
+ parent_rate);
+ struct aoclk_cec_32k *cec_32k = to_aoclk_cec_32k(hw);
+ u32 reg = 0;
+
+ if (!freq)
+ return -EINVAL;
+
+ /* Disable clock */
+ regmap_update_bits(cec_32k->regmap, AO_RTC_ALT_CLK_CNTL0,
+ CLK_CNTL0_IN_GATE_EN | CLK_CNTL0_OUT_GATE_EN, 0);
+
+ reg = FIELD_PREP(CLK_CNTL0_N1_MASK, freq->n1 - 1);
+ if (freq->dualdiv)
+ reg |= CLK_CNTL0_DUALDIV_EN |
+ FIELD_PREP(CLK_CNTL0_N2_MASK, freq->n2 - 1);
+
+ regmap_write(cec_32k->regmap, AO_RTC_ALT_CLK_CNTL0, reg);
+
+ reg = FIELD_PREP(CLK_CNTL1_M1_MASK, freq->m1 - 1);
+ if (freq->dualdiv)
+ reg |= FIELD_PREP(CLK_CNTL1_M2_MASK, freq->m2 - 1);
+
+ regmap_write(cec_32k->regmap, AO_RTC_ALT_CLK_CNTL1, reg);
+
+ /* Enable clock */
+ regmap_update_bits(cec_32k->regmap, AO_RTC_ALT_CLK_CNTL0,
+ CLK_CNTL0_IN_GATE_EN, CLK_CNTL0_IN_GATE_EN);
+
+ udelay(200);
+
+ regmap_update_bits(cec_32k->regmap, AO_RTC_ALT_CLK_CNTL0,
+ CLK_CNTL0_OUT_GATE_EN, CLK_CNTL0_OUT_GATE_EN);
+
+ regmap_update_bits(cec_32k->regmap, AO_CRT_CLK_CNTL1,
+ CLK_CNTL1_SELECT_OSC, CLK_CNTL1_SELECT_OSC);
+
+ /* Select 32k from XTAL */
+ regmap_update_bits(cec_32k->regmap,
+ AO_RTI_PWR_CNTL_REG0,
+ PWR_CNTL_ALT_32K_SEL,
+ FIELD_PREP(PWR_CNTL_ALT_32K_SEL, 4));
+
+ return 0;
+}
+
+const struct clk_ops meson_aoclk_cec_32k_ops = {
+ .recalc_rate = aoclk_cec_32k_recalc_rate,
+ .round_rate = aoclk_cec_32k_round_rate,
+ .set_rate = aoclk_cec_32k_set_rate,
+};
diff --git a/drivers/clk/meson/gxbb-aoclk-regmap.c b/drivers/clk/meson/gxbb-aoclk-regmap.c
new file mode 100644
index 000000000000..2515fbfa0467
--- /dev/null
+++ b/drivers/clk/meson/gxbb-aoclk-regmap.c
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2017 BayLibre, SAS.
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0+
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/bitfield.h>
+#include <linux/regmap.h>
+#include "gxbb-aoclk.h"
+
+static int aoclk_gate_regmap_enable(struct clk_hw *hw)
+{
+ struct aoclk_gate_regmap *gate = to_aoclk_gate_regmap(hw);
+
+ return regmap_update_bits(gate->regmap, AO_RTI_GEN_CNTL_REG0,
+ BIT(gate->bit_idx), BIT(gate->bit_idx));
+}
+
+static void aoclk_gate_regmap_disable(struct clk_hw *hw)
+{
+ struct aoclk_gate_regmap *gate = to_aoclk_gate_regmap(hw);
+
+ regmap_update_bits(gate->regmap, AO_RTI_GEN_CNTL_REG0,
+ BIT(gate->bit_idx), 0);
+}
+
+static int aoclk_gate_regmap_is_enabled(struct clk_hw *hw)
+{
+ struct aoclk_gate_regmap *gate = to_aoclk_gate_regmap(hw);
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(gate->regmap, AO_RTI_GEN_CNTL_REG0, &val);
+ if (ret)
+ return ret;
+
+ return (val & BIT(gate->bit_idx)) != 0;
+}
+
+const struct clk_ops meson_aoclk_gate_regmap_ops = {
+ .enable = aoclk_gate_regmap_enable,
+ .disable = aoclk_gate_regmap_disable,
+ .is_enabled = aoclk_gate_regmap_is_enabled,
+};
diff --git a/drivers/clk/meson/gxbb-aoclk.c b/drivers/clk/meson/gxbb-aoclk.c
index b45c5fba7e35..6c161e0a8e59 100644
--- a/drivers/clk/meson/gxbb-aoclk.c
+++ b/drivers/clk/meson/gxbb-aoclk.c
@@ -56,16 +56,20 @@
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/reset-controller.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
#include <linux/init.h>
+#include <linux/delay.h>
#include <dt-bindings/clock/gxbb-aoclkc.h>
#include <dt-bindings/reset/gxbb-aoclkc.h>
+#include "gxbb-aoclk.h"
static DEFINE_SPINLOCK(gxbb_aoclk_lock);
struct gxbb_aoclk_reset_controller {
struct reset_controller_dev reset;
unsigned int *data;
- void __iomem *base;
+ struct regmap *regmap;
};
static int gxbb_aoclk_do_reset(struct reset_controller_dev *rcdev,
@@ -74,9 +78,8 @@ static int gxbb_aoclk_do_reset(struct reset_controller_dev *rcdev,
struct gxbb_aoclk_reset_controller *reset =
container_of(rcdev, struct gxbb_aoclk_reset_controller, reset);
- writel(BIT(reset->data[id]), reset->base);
-
- return 0;
+ return regmap_write(reset->regmap, AO_RTI_GEN_CNTL_REG0,
+ BIT(reset->data[id]));
}
static const struct reset_control_ops gxbb_aoclk_reset_ops = {
@@ -84,13 +87,12 @@ static const struct reset_control_ops gxbb_aoclk_reset_ops = {
};
#define GXBB_AO_GATE(_name, _bit) \
-static struct clk_gate _name##_ao = { \
- .reg = (void __iomem *)0, \
+static struct aoclk_gate_regmap _name##_ao = { \
.bit_idx = (_bit), \
.lock = &gxbb_aoclk_lock, \
.hw.init = &(struct clk_init_data) { \
.name = #_name "_ao", \
- .ops = &clk_gate_ops, \
+ .ops = &meson_aoclk_gate_regmap_ops, \
.parent_names = (const char *[]){ "clk81" }, \
.num_parents = 1, \
.flags = (CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED), \
@@ -104,6 +106,17 @@ GXBB_AO_GATE(uart1, 3);
GXBB_AO_GATE(uart2, 5);
GXBB_AO_GATE(ir_blaster, 6);
+static struct aoclk_cec_32k cec_32k_ao = {
+ .lock = &gxbb_aoclk_lock,
+ .hw.init = &(struct clk_init_data) {
+ .name = "cec_32k_ao",
+ .ops = &meson_aoclk_cec_32k_ops,
+ .parent_names = (const char *[]){ "xtal" },
+ .num_parents = 1,
+ .flags = CLK_IGNORE_UNUSED,
+ },
+};
+
static unsigned int gxbb_aoclk_reset[] = {
[RESET_AO_REMOTE] = 16,
[RESET_AO_I2C_MASTER] = 18,
@@ -113,7 +126,7 @@ static unsigned int gxbb_aoclk_reset[] = {
[RESET_AO_IR_BLASTER] = 23,
};
-static struct clk_gate *gxbb_aoclk_gate[] = {
+static struct aoclk_gate_regmap *gxbb_aoclk_gate[] = {
[CLKID_AO_REMOTE] = &remote_ao,
[CLKID_AO_I2C_MASTER] = &i2c_master_ao,
[CLKID_AO_I2C_SLAVE] = &i2c_slave_ao,
@@ -130,30 +143,30 @@ static struct clk_hw_onecell_data gxbb_aoclk_onecell_data = {
[CLKID_AO_UART1] = &uart1_ao.hw,
[CLKID_AO_UART2] = &uart2_ao.hw,
[CLKID_AO_IR_BLASTER] = &ir_blaster_ao.hw,
+ [CLKID_AO_CEC_32K] = &cec_32k_ao.hw,
},
- .num = ARRAY_SIZE(gxbb_aoclk_gate),
+ .num = 7,
};
static int gxbb_aoclkc_probe(struct platform_device *pdev)
{
- struct resource *res;
- void __iomem *base;
- int ret, clkid;
- struct device *dev = &pdev->dev;
struct gxbb_aoclk_reset_controller *rstc;
+ struct device *dev = &pdev->dev;
+ struct regmap *regmap;
+ int ret, clkid;
rstc = devm_kzalloc(dev, sizeof(*rstc), GFP_KERNEL);
if (!rstc)
return -ENOMEM;
- /* Generic clocks */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(dev, res);
- if (IS_ERR(base))
- return PTR_ERR(base);
+ regmap = syscon_node_to_regmap(of_get_parent(dev->of_node));
+ if (IS_ERR(regmap)) {
+ dev_err(dev, "failed to get regmap\n");
+ return -ENODEV;
+ }
/* Reset Controller */
- rstc->base = base;
+ rstc->regmap = regmap;
rstc->data = gxbb_aoclk_reset;
rstc->reset.ops = &gxbb_aoclk_reset_ops;
rstc->reset.nr_resets = ARRAY_SIZE(gxbb_aoclk_reset);
@@ -161,10 +174,10 @@ static int gxbb_aoclkc_probe(struct platform_device *pdev)
ret = devm_reset_controller_register(dev, &rstc->reset);
/*
- * Populate base address and register all clks
+ * Populate regmap and register all clks
*/
- for (clkid = 0; clkid < gxbb_aoclk_onecell_data.num; clkid++) {
- gxbb_aoclk_gate[clkid]->reg = base;
+ for (clkid = 0; clkid < ARRAY_SIZE(gxbb_aoclk_gate); clkid++) {
+ gxbb_aoclk_gate[clkid]->regmap = regmap;
ret = devm_clk_hw_register(dev,
gxbb_aoclk_onecell_data.hws[clkid]);
@@ -172,12 +185,18 @@ static int gxbb_aoclkc_probe(struct platform_device *pdev)
return ret;
}
+ /* Specific clocks */
+ cec_32k_ao.regmap = regmap;
+ ret = devm_clk_hw_register(dev, &cec_32k_ao.hw);
+ if (ret)
+ return ret;
+
return of_clk_add_hw_provider(dev->of_node, of_clk_hw_onecell_get,
&gxbb_aoclk_onecell_data);
}
static const struct of_device_id gxbb_aoclkc_match_table[] = {
- { .compatible = "amlogic,gxbb-aoclkc" },
+ { .compatible = "amlogic,meson-gx-aoclkc" },
{ }
};
diff --git a/drivers/clk/meson/gxbb-aoclk.h b/drivers/clk/meson/gxbb-aoclk.h
new file mode 100644
index 000000000000..e8604c8f7eee
--- /dev/null
+++ b/drivers/clk/meson/gxbb-aoclk.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2017 BayLibre, SAS
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0+
+ */
+
+#ifndef __GXBB_AOCLKC_H
+#define __GXBB_AOCLKC_H
+
+/* AO Configuration Clock registers offsets */
+#define AO_RTI_PWR_CNTL_REG1 0x0c
+#define AO_RTI_PWR_CNTL_REG0 0x10
+#define AO_RTI_GEN_CNTL_REG0 0x40
+#define AO_OSCIN_CNTL 0x58
+#define AO_CRT_CLK_CNTL1 0x68
+#define AO_RTC_ALT_CLK_CNTL0 0x94
+#define AO_RTC_ALT_CLK_CNTL1 0x98
+
+struct aoclk_gate_regmap {
+ struct clk_hw hw;
+ unsigned bit_idx;
+ struct regmap *regmap;
+ spinlock_t *lock;
+};
+
+#define to_aoclk_gate_regmap(_hw) \
+ container_of(_hw, struct aoclk_gate_regmap, hw)
+
+extern const struct clk_ops meson_aoclk_gate_regmap_ops;
+
+struct aoclk_cec_32k {
+ struct clk_hw hw;
+ struct regmap *regmap;
+ spinlock_t *lock;
+};
+
+#define to_aoclk_cec_32k(_hw) container_of(_hw, struct aoclk_cec_32k, hw)
+
+extern const struct clk_ops meson_aoclk_cec_32k_ops;
+
+#endif /* __GXBB_AOCLKC_H */
diff --git a/drivers/clk/meson/gxbb.c b/drivers/clk/meson/gxbb.c
index a7ea5f3da89d..b2d1e8ed7152 100644
--- a/drivers/clk/meson/gxbb.c
+++ b/drivers/clk/meson/gxbb.c
@@ -850,13 +850,14 @@ static struct meson_clk_audio_divider gxbb_cts_amclk_div = {
.shift = 0,
.width = 8,
},
+ .flags = CLK_DIVIDER_ROUND_CLOSEST,
.lock = &clk_lock,
.hw.init = &(struct clk_init_data){
.name = "cts_amclk_div",
.ops = &meson_clk_audio_divider_ops,
.parent_names = (const char *[]){ "cts_amclk_sel" },
.num_parents = 1,
- .flags = CLK_SET_RATE_PARENT | CLK_DIVIDER_ROUND_CLOSEST,
+ .flags = CLK_SET_RATE_PARENT,
},
};
@@ -880,7 +881,7 @@ static struct clk_mux gxbb_cts_mclk_i958_sel = {
/* Default parent unknown (register reset value: 0) */
.table = (u32[]){ 1, 2, 3 },
.lock = &clk_lock,
- .hw.init = &(struct clk_init_data){
+ .hw.init = &(struct clk_init_data) {
.name = "cts_mclk_i958_sel",
.ops = &clk_mux_ops,
.parent_names = (const char *[]){ "mpll0", "mpll1", "mpll2" },
@@ -894,12 +895,13 @@ static struct clk_divider gxbb_cts_mclk_i958_div = {
.shift = 16,
.width = 8,
.lock = &clk_lock,
- .hw.init = &(struct clk_init_data){
+ .flags = CLK_DIVIDER_ROUND_CLOSEST,
+ .hw.init = &(struct clk_init_data) {
.name = "cts_mclk_i958_div",
.ops = &clk_divider_ops,
.parent_names = (const char *[]){ "cts_mclk_i958_sel" },
.num_parents = 1,
- .flags = CLK_SET_RATE_PARENT | CLK_DIVIDER_ROUND_CLOSEST,
+ .flags = CLK_SET_RATE_PARENT,
},
};
@@ -979,6 +981,156 @@ static struct clk_mux gxbb_32k_clk_sel = {
},
};
+static const char * const gxbb_sd_emmc_clk0_parent_names[] = {
+ "xtal", "fclk_div2", "fclk_div3", "fclk_div5", "fclk_div7",
+
+ /*
+ * Following these parent clocks, we should also have had mpll2, mpll3
+ * and gp0_pll but these clocks are too precious to be used here. All
+ * the necessary rates for MMC and NAND operation can be acheived using
+ * xtal or fclk_div clocks
+ */
+};
+
+/* SDIO clock */
+static struct clk_mux gxbb_sd_emmc_a_clk0_sel = {
+ .reg = (void *)HHI_SD_EMMC_CLK_CNTL,
+ .mask = 0x7,
+ .shift = 9,
+ .lock = &clk_lock,
+ .hw.init = &(struct clk_init_data) {
+ .name = "sd_emmc_a_clk0_sel",
+ .ops = &clk_mux_ops,
+ .parent_names = gxbb_sd_emmc_clk0_parent_names,
+ .num_parents = ARRAY_SIZE(gxbb_sd_emmc_clk0_parent_names),
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_divider gxbb_sd_emmc_a_clk0_div = {
+ .reg = (void *)HHI_SD_EMMC_CLK_CNTL,
+ .shift = 0,
+ .width = 7,
+ .lock = &clk_lock,
+ .flags = CLK_DIVIDER_ROUND_CLOSEST,
+ .hw.init = &(struct clk_init_data) {
+ .name = "sd_emmc_a_clk0_div",
+ .ops = &clk_divider_ops,
+ .parent_names = (const char *[]){ "sd_emmc_a_clk0_sel" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_gate gxbb_sd_emmc_a_clk0 = {
+ .reg = (void *)HHI_SD_EMMC_CLK_CNTL,
+ .bit_idx = 7,
+ .lock = &clk_lock,
+ .hw.init = &(struct clk_init_data){
+ .name = "sd_emmc_a_clk0",
+ .ops = &clk_gate_ops,
+ .parent_names = (const char *[]){ "sd_emmc_a_clk0_div" },
+ .num_parents = 1,
+
+ /*
+ * FIXME:
+ * We need CLK_IGNORE_UNUSED because mmc DT node point to xtal
+ * instead of this clock. CCF would gate this on boot, killing
+ * the mmc controller. Please remove this flag once DT properly
+ * point to this clock instead of xtal
+ *
+ * Same goes for emmc B and C clocks
+ */
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
+/* SDcard clock */
+static struct clk_mux gxbb_sd_emmc_b_clk0_sel = {
+ .reg = (void *)HHI_SD_EMMC_CLK_CNTL,
+ .mask = 0x7,
+ .shift = 25,
+ .lock = &clk_lock,
+ .hw.init = &(struct clk_init_data) {
+ .name = "sd_emmc_b_clk0_sel",
+ .ops = &clk_mux_ops,
+ .parent_names = gxbb_sd_emmc_clk0_parent_names,
+ .num_parents = ARRAY_SIZE(gxbb_sd_emmc_clk0_parent_names),
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_divider gxbb_sd_emmc_b_clk0_div = {
+ .reg = (void *)HHI_SD_EMMC_CLK_CNTL,
+ .shift = 16,
+ .width = 7,
+ .lock = &clk_lock,
+ .flags = CLK_DIVIDER_ROUND_CLOSEST,
+ .hw.init = &(struct clk_init_data) {
+ .name = "sd_emmc_b_clk0_div",
+ .ops = &clk_divider_ops,
+ .parent_names = (const char *[]){ "sd_emmc_b_clk0_sel" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_gate gxbb_sd_emmc_b_clk0 = {
+ .reg = (void *)HHI_SD_EMMC_CLK_CNTL,
+ .bit_idx = 23,
+ .lock = &clk_lock,
+ .hw.init = &(struct clk_init_data){
+ .name = "sd_emmc_b_clk0",
+ .ops = &clk_gate_ops,
+ .parent_names = (const char *[]){ "sd_emmc_b_clk0_div" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
+/* EMMC/NAND clock */
+static struct clk_mux gxbb_sd_emmc_c_clk0_sel = {
+ .reg = (void *)HHI_NAND_CLK_CNTL,
+ .mask = 0x7,
+ .shift = 9,
+ .lock = &clk_lock,
+ .hw.init = &(struct clk_init_data) {
+ .name = "sd_emmc_c_clk0_sel",
+ .ops = &clk_mux_ops,
+ .parent_names = gxbb_sd_emmc_clk0_parent_names,
+ .num_parents = ARRAY_SIZE(gxbb_sd_emmc_clk0_parent_names),
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_divider gxbb_sd_emmc_c_clk0_div = {
+ .reg = (void *)HHI_NAND_CLK_CNTL,
+ .shift = 0,
+ .width = 7,
+ .lock = &clk_lock,
+ .flags = CLK_DIVIDER_ROUND_CLOSEST,
+ .hw.init = &(struct clk_init_data) {
+ .name = "sd_emmc_c_clk0_div",
+ .ops = &clk_divider_ops,
+ .parent_names = (const char *[]){ "sd_emmc_c_clk0_sel" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_gate gxbb_sd_emmc_c_clk0 = {
+ .reg = (void *)HHI_NAND_CLK_CNTL,
+ .bit_idx = 7,
+ .lock = &clk_lock,
+ .hw.init = &(struct clk_init_data){
+ .name = "sd_emmc_c_clk0",
+ .ops = &clk_gate_ops,
+ .parent_names = (const char *[]){ "sd_emmc_c_clk0_div" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
/* Everything Else (EE) domain gates */
static MESON_GATE(gxbb_ddr, HHI_GCLK_MPEG0, 0);
static MESON_GATE(gxbb_dos, HHI_GCLK_MPEG0, 1);
@@ -1188,6 +1340,16 @@ static struct clk_hw_onecell_data gxbb_hw_onecell_data = {
[CLKID_32K_CLK] = &gxbb_32k_clk.hw,
[CLKID_32K_CLK_SEL] = &gxbb_32k_clk_sel.hw,
[CLKID_32K_CLK_DIV] = &gxbb_32k_clk_div.hw,
+ [CLKID_SD_EMMC_A_CLK0_SEL] = &gxbb_sd_emmc_a_clk0_sel.hw,
+ [CLKID_SD_EMMC_A_CLK0_DIV] = &gxbb_sd_emmc_a_clk0_div.hw,
+ [CLKID_SD_EMMC_A_CLK0] = &gxbb_sd_emmc_a_clk0.hw,
+ [CLKID_SD_EMMC_B_CLK0_SEL] = &gxbb_sd_emmc_b_clk0_sel.hw,
+ [CLKID_SD_EMMC_B_CLK0_DIV] = &gxbb_sd_emmc_b_clk0_div.hw,
+ [CLKID_SD_EMMC_B_CLK0] = &gxbb_sd_emmc_b_clk0.hw,
+ [CLKID_SD_EMMC_C_CLK0_SEL] = &gxbb_sd_emmc_c_clk0_sel.hw,
+ [CLKID_SD_EMMC_C_CLK0_DIV] = &gxbb_sd_emmc_c_clk0_div.hw,
+ [CLKID_SD_EMMC_C_CLK0] = &gxbb_sd_emmc_c_clk0.hw,
+ [NR_CLKS] = NULL,
},
.num = NR_CLKS,
};
@@ -1310,6 +1472,16 @@ static struct clk_hw_onecell_data gxl_hw_onecell_data = {
[CLKID_32K_CLK] = &gxbb_32k_clk.hw,
[CLKID_32K_CLK_SEL] = &gxbb_32k_clk_sel.hw,
[CLKID_32K_CLK_DIV] = &gxbb_32k_clk_div.hw,
+ [CLKID_SD_EMMC_A_CLK0_SEL] = &gxbb_sd_emmc_a_clk0_sel.hw,
+ [CLKID_SD_EMMC_A_CLK0_DIV] = &gxbb_sd_emmc_a_clk0_div.hw,
+ [CLKID_SD_EMMC_A_CLK0] = &gxbb_sd_emmc_a_clk0.hw,
+ [CLKID_SD_EMMC_B_CLK0_SEL] = &gxbb_sd_emmc_b_clk0_sel.hw,
+ [CLKID_SD_EMMC_B_CLK0_DIV] = &gxbb_sd_emmc_b_clk0_div.hw,
+ [CLKID_SD_EMMC_B_CLK0] = &gxbb_sd_emmc_b_clk0.hw,
+ [CLKID_SD_EMMC_C_CLK0_SEL] = &gxbb_sd_emmc_c_clk0_sel.hw,
+ [CLKID_SD_EMMC_C_CLK0_DIV] = &gxbb_sd_emmc_c_clk0_div.hw,
+ [CLKID_SD_EMMC_C_CLK0] = &gxbb_sd_emmc_c_clk0.hw,
+ [NR_CLKS] = NULL,
},
.num = NR_CLKS,
};
@@ -1425,6 +1597,9 @@ static struct clk_gate *const gxbb_clk_gates[] = {
&gxbb_cts_amclk,
&gxbb_cts_mclk_i958,
&gxbb_32k_clk,
+ &gxbb_sd_emmc_a_clk0,
+ &gxbb_sd_emmc_b_clk0,
+ &gxbb_sd_emmc_c_clk0,
};
static struct clk_mux *const gxbb_clk_muxes[] = {
@@ -1437,6 +1612,9 @@ static struct clk_mux *const gxbb_clk_muxes[] = {
&gxbb_cts_mclk_i958_sel,
&gxbb_cts_i958,
&gxbb_32k_clk_sel,
+ &gxbb_sd_emmc_a_clk0_sel,
+ &gxbb_sd_emmc_b_clk0_sel,
+ &gxbb_sd_emmc_c_clk0_sel,
};
static struct clk_divider *const gxbb_clk_dividers[] = {
@@ -1446,6 +1624,9 @@ static struct clk_divider *const gxbb_clk_dividers[] = {
&gxbb_mali_1_div,
&gxbb_cts_mclk_i958_div,
&gxbb_32k_clk_div,
+ &gxbb_sd_emmc_a_clk0_div,
+ &gxbb_sd_emmc_b_clk0_div,
+ &gxbb_sd_emmc_c_clk0_div,
};
static struct meson_clk_audio_divider *const gxbb_audio_dividers[] = {
diff --git a/drivers/clk/meson/gxbb.h b/drivers/clk/meson/gxbb.h
index d63e77e8433d..5b1d4b374d1c 100644
--- a/drivers/clk/meson/gxbb.h
+++ b/drivers/clk/meson/gxbb.h
@@ -167,130 +167,33 @@
* CLKID index values
*
* These indices are entirely contrived and do not map onto the hardware.
- * Migrate them out of this header and into the DT header file when they need
- * to be exposed to client nodes in DT: include/dt-bindings/clock/gxbb-clkc.h
+ * It has now been decided to expose everything by default in the DT header:
+ * include/dt-bindings/clock/gxbb-clkc.h. Only the clocks ids we don't want
+ * to expose, such as the internal muxes and dividers of composite clocks,
+ * will remain defined here.
*/
-#define CLKID_SYS_PLL 0
/* ID 1 is unused (it was used by the non-existing CLKID_CPUCLK before) */
-/* CLKID_HDMI_PLL */
-#define CLKID_FIXED_PLL 3
-/* CLKID_FCLK_DIV2 */
-/* CLKID_FCLK_DIV3 */
-/* CLKID_FCLK_DIV4 */
-#define CLKID_FCLK_DIV5 7
-#define CLKID_FCLK_DIV7 8
-/* CLKID_GP0_PLL */
#define CLKID_MPEG_SEL 10
#define CLKID_MPEG_DIV 11
-/* CLKID_CLK81 */
-#define CLKID_MPLL0 13
-#define CLKID_MPLL1 14
-/* CLKID_MPLL2 */
-#define CLKID_DDR 16
-#define CLKID_DOS 17
-#define CLKID_ISA 18
-#define CLKID_PL301 19
-#define CLKID_PERIPHS 20
-/* CLKID_SPICC */
-/* CLKID_I2C */
-/* #define CLKID_SAR_ADC */
-#define CLKID_SMART_CARD 24
-/* CLKID_RNG0 */
-/* CLKID_UART0 */
-#define CLKID_SDHC 27
-#define CLKID_STREAM 28
-#define CLKID_ASYNC_FIFO 29
-#define CLKID_SDIO 30
-#define CLKID_ABUF 31
-#define CLKID_HIU_IFACE 32
-#define CLKID_ASSIST_MISC 33
-/* CLKID_SPI */
-#define CLKID_I2S_SPDIF 35
-/* CLKID_ETH */
-#define CLKID_DEMUX 37
-/* CLKID_AIU_GLUE */
-/* CLKID_IEC958 */
-/* CLKID_I2S_OUT */
-#define CLKID_AMCLK 41
-#define CLKID_AIFIFO2 42
-#define CLKID_MIXER 43
-/* CLKID_MIXER_IFACE */
-#define CLKID_ADC 45
-#define CLKID_BLKMV 46
-/* CLKID_AIU */
-/* CLKID_UART1 */
-#define CLKID_G2D 49
-/* CLKID_USB0 */
-/* CLKID_USB1 */
-#define CLKID_RESET 52
-#define CLKID_NAND 53
-#define CLKID_DOS_PARSER 54
-/* CLKID_USB */
-#define CLKID_VDIN1 56
-#define CLKID_AHB_ARB0 57
-#define CLKID_EFUSE 58
-#define CLKID_BOOT_ROM 59
-#define CLKID_AHB_DATA_BUS 60
-#define CLKID_AHB_CTRL_BUS 61
-#define CLKID_HDMI_INTR_SYNC 62
-/* CLKID_HDMI_PCLK */
-/* CLKID_USB1_DDR_BRIDGE */
-/* CLKID_USB0_DDR_BRIDGE */
-#define CLKID_MMC_PCLK 66
-#define CLKID_DVIN 67
-/* CLKID_UART2 */
-/* #define CLKID_SANA */
-#define CLKID_VPU_INTR 70
-#define CLKID_SEC_AHB_AHB3_BRIDGE 71
-#define CLKID_CLK81_A53 72
-#define CLKID_VCLK2_VENCI0 73
-#define CLKID_VCLK2_VENCI1 74
-#define CLKID_VCLK2_VENCP0 75
-#define CLKID_VCLK2_VENCP1 76
-/* CLKID_GCLK_VENCI_INT0 */
-#define CLKID_GCLK_VENCI_INT 78
-#define CLKID_DAC_CLK 79
-/* CLKID_AOCLK_GATE */
-/* CLKID_IEC958_GATE */
-#define CLKID_ENC480P 82
-#define CLKID_RNG1 83
-#define CLKID_GCLK_VENCI_INT1 84
-#define CLKID_VCLK2_VENCLMCC 85
-#define CLKID_VCLK2_VENCL 86
-#define CLKID_VCLK_OTHER 87
-#define CLKID_EDP 88
-#define CLKID_AO_MEDIA_CPU 89
-#define CLKID_AO_AHB_SRAM 90
-#define CLKID_AO_AHB_BUS 91
-#define CLKID_AO_IFACE 92
-/* CLKID_AO_I2C */
-/* CLKID_SD_EMMC_A */
-/* CLKID_SD_EMMC_B */
-/* CLKID_SD_EMMC_C */
-/* CLKID_SAR_ADC_CLK */
-/* CLKID_SAR_ADC_SEL */
#define CLKID_SAR_ADC_DIV 99
-/* CLKID_MALI_0_SEL */
-#define CLKID_MALI_0_DIV 101
-/* CLKID_MALI_0 */
-/* CLKID_MALI_1_SEL */
-#define CLKID_MALI_1_DIV 104
-/* CLKID_MALI_1 */
-/* CLKID_MALI */
-/* CLKID_CTS_AMCLK */
+#define CLKID_MALI_0_DIV 101
+#define CLKID_MALI_1_DIV 104
#define CLKID_CTS_AMCLK_SEL 108
#define CLKID_CTS_AMCLK_DIV 109
-/* CLKID_CTS_MCLK_I958 */
#define CLKID_CTS_MCLK_I958_SEL 111
#define CLKID_CTS_MCLK_I958_DIV 112
-/* CLKID_CTS_I958 */
-#define CLKID_32K_CLK 114
#define CLKID_32K_CLK_SEL 115
#define CLKID_32K_CLK_DIV 116
+#define CLKID_SD_EMMC_A_CLK0_SEL 117
+#define CLKID_SD_EMMC_A_CLK0_DIV 118
+#define CLKID_SD_EMMC_B_CLK0_SEL 120
+#define CLKID_SD_EMMC_B_CLK0_DIV 121
+#define CLKID_SD_EMMC_C_CLK0_SEL 123
+#define CLKID_SD_EMMC_C_CLK0_DIV 124
-#define NR_CLKS 117
+#define NR_CLKS 126
-/* include the CLKIDs that have been made part of the stable DT binding */
+/* include the CLKIDs that have been made part of the DT binding */
#include <dt-bindings/clock/gxbb-clkc.h>
#endif /* __GXBB_H */
diff --git a/drivers/clk/meson/meson8b.c b/drivers/clk/meson/meson8b.c
index 6ec512ad2598..20ab7190d328 100644
--- a/drivers/clk/meson/meson8b.c
+++ b/drivers/clk/meson/meson8b.c
@@ -25,6 +25,8 @@
#include <linux/clk-provider.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
+#include <linux/reset-controller.h>
+#include <linux/slab.h>
#include <linux/init.h>
#include "clkc.h"
@@ -32,6 +34,13 @@
static DEFINE_SPINLOCK(clk_lock);
+static void __iomem *clk_base;
+
+struct meson8b_clk_reset {
+ struct reset_controller_dev reset;
+ void __iomem *base;
+};
+
static const struct pll_rate_table sys_pll_rate_table[] = {
PLL_RATE(312000000, 52, 1, 2),
PLL_RATE(336000000, 56, 1, 2),
@@ -590,6 +599,7 @@ static struct clk_hw_onecell_data meson8b_hw_onecell_data = {
[CLKID_MPLL0] = &meson8b_mpll0.hw,
[CLKID_MPLL1] = &meson8b_mpll1.hw,
[CLKID_MPLL2] = &meson8b_mpll2.hw,
+ [CLK_NR_CLKS] = NULL,
},
.num = CLK_NR_CLKS,
};
@@ -695,20 +705,114 @@ static struct clk_divider *const meson8b_clk_dividers[] = {
&meson8b_mpeg_clk_div,
};
+static const struct meson8b_clk_reset_line {
+ u32 reg;
+ u8 bit_idx;
+} meson8b_clk_reset_bits[] = {
+ [CLKC_RESET_L2_CACHE_SOFT_RESET] = {
+ .reg = HHI_SYS_CPU_CLK_CNTL0, .bit_idx = 30
+ },
+ [CLKC_RESET_AXI_64_TO_128_BRIDGE_A5_SOFT_RESET] = {
+ .reg = HHI_SYS_CPU_CLK_CNTL0, .bit_idx = 29
+ },
+ [CLKC_RESET_SCU_SOFT_RESET] = {
+ .reg = HHI_SYS_CPU_CLK_CNTL0, .bit_idx = 28
+ },
+ [CLKC_RESET_CPU3_SOFT_RESET] = {
+ .reg = HHI_SYS_CPU_CLK_CNTL0, .bit_idx = 27
+ },
+ [CLKC_RESET_CPU2_SOFT_RESET] = {
+ .reg = HHI_SYS_CPU_CLK_CNTL0, .bit_idx = 26
+ },
+ [CLKC_RESET_CPU1_SOFT_RESET] = {
+ .reg = HHI_SYS_CPU_CLK_CNTL0, .bit_idx = 25
+ },
+ [CLKC_RESET_CPU0_SOFT_RESET] = {
+ .reg = HHI_SYS_CPU_CLK_CNTL0, .bit_idx = 24
+ },
+ [CLKC_RESET_A5_GLOBAL_RESET] = {
+ .reg = HHI_SYS_CPU_CLK_CNTL0, .bit_idx = 18
+ },
+ [CLKC_RESET_A5_AXI_SOFT_RESET] = {
+ .reg = HHI_SYS_CPU_CLK_CNTL0, .bit_idx = 17
+ },
+ [CLKC_RESET_A5_ABP_SOFT_RESET] = {
+ .reg = HHI_SYS_CPU_CLK_CNTL0, .bit_idx = 16
+ },
+ [CLKC_RESET_AXI_64_TO_128_BRIDGE_MMC_SOFT_RESET] = {
+ .reg = HHI_SYS_CPU_CLK_CNTL1, .bit_idx = 30
+ },
+ [CLKC_RESET_VID_CLK_CNTL_SOFT_RESET] = {
+ .reg = HHI_VID_CLK_CNTL, .bit_idx = 15
+ },
+ [CLKC_RESET_VID_DIVIDER_CNTL_SOFT_RESET_POST] = {
+ .reg = HHI_VID_DIVIDER_CNTL, .bit_idx = 7
+ },
+ [CLKC_RESET_VID_DIVIDER_CNTL_SOFT_RESET_PRE] = {
+ .reg = HHI_VID_DIVIDER_CNTL, .bit_idx = 3
+ },
+ [CLKC_RESET_VID_DIVIDER_CNTL_RESET_N_POST] = {
+ .reg = HHI_VID_DIVIDER_CNTL, .bit_idx = 1
+ },
+ [CLKC_RESET_VID_DIVIDER_CNTL_RESET_N_PRE] = {
+ .reg = HHI_VID_DIVIDER_CNTL, .bit_idx = 0
+ },
+};
+
+static int meson8b_clk_reset_update(struct reset_controller_dev *rcdev,
+ unsigned long id, bool assert)
+{
+ struct meson8b_clk_reset *meson8b_clk_reset =
+ container_of(rcdev, struct meson8b_clk_reset, reset);
+ unsigned long flags;
+ const struct meson8b_clk_reset_line *reset;
+ u32 val;
+
+ if (id >= ARRAY_SIZE(meson8b_clk_reset_bits))
+ return -EINVAL;
+
+ reset = &meson8b_clk_reset_bits[id];
+
+ spin_lock_irqsave(&clk_lock, flags);
+
+ val = readl(meson8b_clk_reset->base + reset->reg);
+ if (assert)
+ val |= BIT(reset->bit_idx);
+ else
+ val &= ~BIT(reset->bit_idx);
+ writel(val, meson8b_clk_reset->base + reset->reg);
+
+ spin_unlock_irqrestore(&clk_lock, flags);
+
+ return 0;
+}
+
+static int meson8b_clk_reset_assert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ return meson8b_clk_reset_update(rcdev, id, true);
+}
+
+static int meson8b_clk_reset_deassert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ return meson8b_clk_reset_update(rcdev, id, false);
+}
+
+static const struct reset_control_ops meson8b_clk_reset_ops = {
+ .assert = meson8b_clk_reset_assert,
+ .deassert = meson8b_clk_reset_deassert,
+};
+
static int meson8b_clkc_probe(struct platform_device *pdev)
{
- void __iomem *clk_base;
int ret, clkid, i;
struct clk_hw *parent_hw;
struct clk *parent_clk;
struct device *dev = &pdev->dev;
- /* Generic clocks and PLLs */
- clk_base = of_iomap(dev->of_node, 1);
- if (!clk_base) {
- pr_err("%s: Unable to map clk base\n", __func__);
+ if (!clk_base)
return -ENXIO;
- }
/* Populate base address for PLLs */
for (i = 0; i < ARRAY_SIZE(meson8b_clk_plls); i++)
@@ -748,7 +852,7 @@ static int meson8b_clkc_probe(struct platform_device *pdev)
/* FIXME convert to devm_clk_register */
ret = devm_clk_hw_register(dev, meson8b_hw_onecell_data.hws[clkid]);
if (ret)
- goto iounmap;
+ return ret;
}
/*
@@ -771,15 +875,11 @@ static int meson8b_clkc_probe(struct platform_device *pdev)
if (ret) {
pr_err("%s: failed to register clock notifier for cpu_clk\n",
__func__);
- goto iounmap;
+ return ret;
}
return of_clk_add_hw_provider(dev->of_node, of_clk_hw_onecell_get,
&meson8b_hw_onecell_data);
-
-iounmap:
- iounmap(clk_base);
- return ret;
}
static const struct of_device_id meson8b_clkc_match_table[] = {
@@ -798,3 +898,39 @@ static struct platform_driver meson8b_driver = {
};
builtin_platform_driver(meson8b_driver);
+
+static void __init meson8b_clkc_reset_init(struct device_node *np)
+{
+ struct meson8b_clk_reset *rstc;
+ int ret;
+
+ /* Generic clocks, PLLs and some of the reset-bits */
+ clk_base = of_iomap(np, 1);
+ if (!clk_base) {
+ pr_err("%s: Unable to map clk base\n", __func__);
+ return;
+ }
+
+ rstc = kzalloc(sizeof(*rstc), GFP_KERNEL);
+ if (!rstc)
+ return;
+
+ /* Reset Controller */
+ rstc->base = clk_base;
+ rstc->reset.ops = &meson8b_clk_reset_ops;
+ rstc->reset.nr_resets = ARRAY_SIZE(meson8b_clk_reset_bits);
+ rstc->reset.of_node = np;
+ ret = reset_controller_register(&rstc->reset);
+ if (ret) {
+ pr_err("%s: Failed to register clkc reset controller: %d\n",
+ __func__, ret);
+ return;
+ }
+}
+
+CLK_OF_DECLARE_DRIVER(meson8_clkc, "amlogic,meson8-clkc",
+ meson8b_clkc_reset_init);
+CLK_OF_DECLARE_DRIVER(meson8b_clkc, "amlogic,meson8b-clkc",
+ meson8b_clkc_reset_init);
+CLK_OF_DECLARE_DRIVER(meson8m2_clkc, "amlogic,meson8m2-clkc",
+ meson8b_clkc_reset_init);
diff --git a/drivers/clk/meson/meson8b.h b/drivers/clk/meson/meson8b.h
index a687e02547dc..2eaf8a52e7dd 100644
--- a/drivers/clk/meson/meson8b.h
+++ b/drivers/clk/meson/meson8b.h
@@ -37,6 +37,9 @@
#define HHI_GCLK_AO 0x154 /* 0x55 offset in data sheet */
#define HHI_SYS_CPU_CLK_CNTL1 0x15c /* 0x57 offset in data sheet */
#define HHI_MPEG_CLK_CNTL 0x174 /* 0x5d offset in data sheet */
+#define HHI_VID_CLK_CNTL 0x17c /* 0x5f offset in data sheet */
+#define HHI_VID_DIVIDER_CNTL 0x198 /* 0x66 offset in data sheet */
+#define HHI_SYS_CPU_CLK_CNTL0 0x19c /* 0x67 offset in data sheet */
#define HHI_MPLL_CNTL 0x280 /* 0xa0 offset in data sheet */
#define HHI_SYS_PLL_CNTL 0x300 /* 0xc0 offset in data sheet */
#define HHI_VID_PLL_CNTL 0x320 /* 0xc8 offset in data sheet */
@@ -60,110 +63,19 @@
* CLKID index values
*
* These indices are entirely contrived and do not map onto the hardware.
- * Migrate them out of this header and into the DT header file when they need
- * to be exposed to client nodes in DT: include/dt-bindings/clock/meson8b-clkc.h
+ * It has now been decided to expose everything by default in the DT header:
+ * include/dt-bindings/clock/gxbb-clkc.h. Only the clocks ids we don't want
+ * to expose, such as the internal muxes and dividers of composite clocks,
+ * will remain defined here.
*/
-/* CLKID_UNUSED */
-/* CLKID_XTAL */
-/* CLKID_PLL_FIXED */
-/* CLKID_PLL_VID */
-/* CLKID_PLL_SYS */
-/* CLKID_FCLK_DIV2 */
-/* CLKID_FCLK_DIV3 */
-/* CLKID_FCLK_DIV4 */
-/* CLKID_FCLK_DIV5 */
-/* CLKID_FCLK_DIV7 */
-/* CLKID_CLK81 */
-/* CLKID_MALI */
-/* CLKID_CPUCLK */
-/* CLKID_ZERO */
-/* CLKID_MPEG_SEL */
-/* CLKID_MPEG_DIV */
-#define CLKID_DDR 16
-#define CLKID_DOS 17
-#define CLKID_ISA 18
-#define CLKID_PL301 19
-#define CLKID_PERIPHS 20
-#define CLKID_SPICC 21
-#define CLKID_I2C 22
-/* #define CLKID_SAR_ADC */
-#define CLKID_SMART_CARD 24
-/* #define CLKID_RNG0 */
-#define CLKID_UART0 26
-#define CLKID_SDHC 27
-#define CLKID_STREAM 28
-#define CLKID_ASYNC_FIFO 29
-/* #define CLKID_SDIO */
-#define CLKID_ABUF 31
-#define CLKID_HIU_IFACE 32
-#define CLKID_ASSIST_MISC 33
-#define CLKID_SPI 34
-#define CLKID_I2S_SPDIF 35
-/* #define CLKID_ETH */
-#define CLKID_DEMUX 37
-#define CLKID_AIU_GLUE 38
-#define CLKID_IEC958 39
-#define CLKID_I2S_OUT 40
-#define CLKID_AMCLK 41
-#define CLKID_AIFIFO2 42
-#define CLKID_MIXER 43
-#define CLKID_MIXER_IFACE 44
-#define CLKID_ADC 45
-#define CLKID_BLKMV 46
-#define CLKID_AIU 47
-#define CLKID_UART1 48
-#define CLKID_G2D 49
-/* #define CLKID_USB0 */
-/* #define CLKID_USB1 */
-#define CLKID_RESET 52
-#define CLKID_NAND 53
-#define CLKID_DOS_PARSER 54
-/* #define CLKID_USB */
-#define CLKID_VDIN1 56
-#define CLKID_AHB_ARB0 57
-#define CLKID_EFUSE 58
-#define CLKID_BOOT_ROM 59
-#define CLKID_AHB_DATA_BUS 60
-#define CLKID_AHB_CTRL_BUS 61
-#define CLKID_HDMI_INTR_SYNC 62
-#define CLKID_HDMI_PCLK 63
-/* CLKID_USB1_DDR_BRIDGE */
-/* CLKID_USB0_DDR_BRIDGE */
-#define CLKID_MMC_PCLK 66
-#define CLKID_DVIN 67
-#define CLKID_UART2 68
-/* #define CLKID_SANA */
-#define CLKID_VPU_INTR 70
-#define CLKID_SEC_AHB_AHB3_BRIDGE 71
-#define CLKID_CLK81_A9 72
-#define CLKID_VCLK2_VENCI0 73
-#define CLKID_VCLK2_VENCI1 74
-#define CLKID_VCLK2_VENCP0 75
-#define CLKID_VCLK2_VENCP1 76
-#define CLKID_GCLK_VENCI_INT 77
-#define CLKID_GCLK_VENCP_INT 78
-#define CLKID_DAC_CLK 79
-#define CLKID_AOCLK_GATE 80
-#define CLKID_IEC958_GATE 81
-#define CLKID_ENC480P 82
-#define CLKID_RNG1 83
-#define CLKID_GCLK_VENCL_INT 84
-#define CLKID_VCLK2_VENCLMCC 85
-#define CLKID_VCLK2_VENCL 86
-#define CLKID_VCLK2_OTHER 87
-#define CLKID_EDP 88
-#define CLKID_AO_MEDIA_CPU 89
-#define CLKID_AO_AHB_SRAM 90
-#define CLKID_AO_AHB_BUS 91
-#define CLKID_AO_IFACE 92
-#define CLKID_MPLL0 93
-#define CLKID_MPLL1 94
-#define CLKID_MPLL2 95
-
#define CLK_NR_CLKS 96
-/* include the CLKIDs that have been made part of the stable DT binding */
+/*
+ * include the CLKID and RESETID that have
+ * been made part of the stable DT binding
+ */
#include <dt-bindings/clock/meson8b-clkc.h>
+#include <dt-bindings/reset/amlogic,meson8b-clkc-reset.h>
#endif /* __MESON8B_H */
diff --git a/drivers/clk/mmp/clk.c b/drivers/clk/mmp/clk.c
index 61893fe73251..089927e4cda2 100644
--- a/drivers/clk/mmp/clk.c
+++ b/drivers/clk/mmp/clk.c
@@ -9,7 +9,7 @@
void mmp_clk_init(struct device_node *np, struct mmp_clk_unit *unit,
int nr_clks)
{
- static struct clk **clk_table;
+ struct clk **clk_table;
clk_table = kcalloc(nr_clks, sizeof(struct clk *), GFP_KERNEL);
if (!clk_table)
diff --git a/drivers/clk/nxp/clk-lpc32xx.c b/drivers/clk/nxp/clk-lpc32xx.c
index 5b98ff9076f3..7b359afd620e 100644
--- a/drivers/clk/nxp/clk-lpc32xx.c
+++ b/drivers/clk/nxp/clk-lpc32xx.c
@@ -885,7 +885,7 @@ static const struct clk_ops clk_usb_i2c_ops = {
.recalc_rate = clk_usb_i2c_recalc_rate,
};
-static int clk_gate_enable(struct clk_hw *hw)
+static int lpc32xx_clk_gate_enable(struct clk_hw *hw)
{
struct lpc32xx_clk_gate *clk = to_lpc32xx_gate(hw);
u32 mask = BIT(clk->bit_idx);
@@ -894,7 +894,7 @@ static int clk_gate_enable(struct clk_hw *hw)
return regmap_update_bits(clk_regmap, clk->reg, mask, val);
}
-static void clk_gate_disable(struct clk_hw *hw)
+static void lpc32xx_clk_gate_disable(struct clk_hw *hw)
{
struct lpc32xx_clk_gate *clk = to_lpc32xx_gate(hw);
u32 mask = BIT(clk->bit_idx);
@@ -903,7 +903,7 @@ static void clk_gate_disable(struct clk_hw *hw)
regmap_update_bits(clk_regmap, clk->reg, mask, val);
}
-static int clk_gate_is_enabled(struct clk_hw *hw)
+static int lpc32xx_clk_gate_is_enabled(struct clk_hw *hw)
{
struct lpc32xx_clk_gate *clk = to_lpc32xx_gate(hw);
u32 val;
@@ -916,9 +916,9 @@ static int clk_gate_is_enabled(struct clk_hw *hw)
}
static const struct clk_ops lpc32xx_clk_gate_ops = {
- .enable = clk_gate_enable,
- .disable = clk_gate_disable,
- .is_enabled = clk_gate_is_enabled,
+ .enable = lpc32xx_clk_gate_enable,
+ .disable = lpc32xx_clk_gate_disable,
+ .is_enabled = lpc32xx_clk_gate_is_enabled,
};
#define div_mask(width) ((1 << (width)) - 1)
diff --git a/drivers/clk/qcom/clk-smd-rpm.c b/drivers/clk/qcom/clk-smd-rpm.c
index d990fe44aef3..cc03d5508627 100644
--- a/drivers/clk/qcom/clk-smd-rpm.c
+++ b/drivers/clk/qcom/clk-smd-rpm.c
@@ -412,8 +412,6 @@ static const struct clk_ops clk_smd_rpm_ops = {
static const struct clk_ops clk_smd_rpm_branch_ops = {
.prepare = clk_smd_rpm_prepare,
.unprepare = clk_smd_rpm_unprepare,
- .round_rate = clk_smd_rpm_round_rate,
- .recalc_rate = clk_smd_rpm_recalc_rate,
};
/* msm8916 */
diff --git a/drivers/clk/qcom/gcc-msm8916.c b/drivers/clk/qcom/gcc-msm8916.c
index 2cfe7000fc60..3410ee68d4bc 100644
--- a/drivers/clk/qcom/gcc-msm8916.c
+++ b/drivers/clk/qcom/gcc-msm8916.c
@@ -1176,7 +1176,7 @@ static struct clk_rcg2 bimc_gpu_clk_src = {
.parent_names = gcc_xo_gpll0_bimc,
.num_parents = 3,
.flags = CLK_GET_RATE_NOCACHE,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
},
};
diff --git a/drivers/clk/qcom/gcc-msm8996.c b/drivers/clk/qcom/gcc-msm8996.c
index 8abc200d4fd3..7ddec886fcd3 100644
--- a/drivers/clk/qcom/gcc-msm8996.c
+++ b/drivers/clk/qcom/gcc-msm8996.c
@@ -2730,6 +2730,32 @@ static struct clk_fixed_factor ufs_rx_cfg_clk_src = {
},
};
+static struct clk_branch gcc_hlos1_vote_lpass_core_smmu_clk = {
+ .halt_reg = 0x7d010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x7d010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "hlos1_vote_lpass_core_smmu_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_hlos1_vote_lpass_adsp_smmu_clk = {
+ .halt_reg = 0x7d014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x7d014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "hlos1_vote_lpass_adsp_smmu_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static struct clk_branch gcc_ufs_rx_cfg_clk = {
.halt_reg = 0x75014,
.clkr = {
@@ -3307,6 +3333,8 @@ static struct clk_regmap *gcc_msm8996_clocks[] = {
[GCC_UFS_AHB_CLK] = &gcc_ufs_ahb_clk.clkr,
[GCC_UFS_TX_CFG_CLK] = &gcc_ufs_tx_cfg_clk.clkr,
[GCC_UFS_RX_CFG_CLK] = &gcc_ufs_rx_cfg_clk.clkr,
+ [GCC_HLOS1_VOTE_LPASS_CORE_SMMU_CLK] = &gcc_hlos1_vote_lpass_core_smmu_clk.clkr,
+ [GCC_HLOS1_VOTE_LPASS_ADSP_SMMU_CLK] = &gcc_hlos1_vote_lpass_adsp_smmu_clk.clkr,
[GCC_UFS_TX_SYMBOL_0_CLK] = &gcc_ufs_tx_symbol_0_clk.clkr,
[GCC_UFS_RX_SYMBOL_0_CLK] = &gcc_ufs_rx_symbol_0_clk.clkr,
[GCC_UFS_RX_SYMBOL_1_CLK] = &gcc_ufs_rx_symbol_1_clk.clkr,
diff --git a/drivers/clk/renesas/Kconfig b/drivers/clk/renesas/Kconfig
index 78d1df9112ba..acbb38151ba1 100644
--- a/drivers/clk/renesas/Kconfig
+++ b/drivers/clk/renesas/Kconfig
@@ -15,6 +15,7 @@ config CLK_RENESAS
select CLK_R8A7794 if ARCH_R8A7794
select CLK_R8A7795 if ARCH_R8A7795
select CLK_R8A7796 if ARCH_R8A7796
+ select CLK_R8A77995 if ARCH_R8A77995
select CLK_SH73A0 if ARCH_SH73A0
if CLK_RENESAS
@@ -34,94 +35,103 @@ config CLK_EMEV2
bool "Emma Mobile EV2 clock support" if COMPILE_TEST
config CLK_RZA1
- bool
+ bool "RZ/A1H clock support" if COMPILE_TEST
select CLK_RENESAS_CPG_MSTP
config CLK_R8A73A4
- bool
+ bool "R-Mobile APE6 clock support" if COMPILE_TEST
select CLK_RENESAS_CPG_MSTP
select CLK_RENESAS_DIV6
config CLK_R8A7740
- bool
+ bool "R-Mobile A1 clock support" if COMPILE_TEST
select CLK_RENESAS_CPG_MSTP
select CLK_RENESAS_DIV6
config CLK_R8A7743
- bool
+ bool "RZ/G1M clock support" if COMPILE_TEST
select CLK_RCAR_GEN2_CPG
config CLK_R8A7745
- bool
+ bool "RZ/G1E clock support" if COMPILE_TEST
select CLK_RCAR_GEN2_CPG
config CLK_R8A7778
- bool
+ bool "R-Car M1A clock support" if COMPILE_TEST
select CLK_RENESAS_CPG_MSTP
config CLK_R8A7779
- bool
+ bool "R-Car H1 clock support" if COMPILE_TEST
select CLK_RENESAS_CPG_MSTP
config CLK_R8A7790
- bool
+ bool "R-Car H2 clock support" if COMPILE_TEST
select CLK_RCAR_GEN2 if CLK_RENESAS_LEGACY
select CLK_RCAR_GEN2_CPG
select CLK_RENESAS_DIV6
config CLK_R8A7791
- bool
+ bool "R-Car M2-W/N clock support" if COMPILE_TEST
select CLK_RCAR_GEN2 if CLK_RENESAS_LEGACY
select CLK_RCAR_GEN2_CPG
select CLK_RENESAS_DIV6
config CLK_R8A7792
- bool
+ bool "R-Car V2H clock support" if COMPILE_TEST
select CLK_RCAR_GEN2 if CLK_RENESAS_LEGACY
select CLK_RCAR_GEN2_CPG
config CLK_R8A7794
- bool
+ bool "R-Car E2 clock support" if COMPILE_TEST
select CLK_RCAR_GEN2 if CLK_RENESAS_LEGACY
select CLK_RCAR_GEN2_CPG
select CLK_RENESAS_DIV6
config CLK_R8A7795
- bool
+ bool "R-Car H3 clock support" if COMPILE_TEST
select CLK_RCAR_GEN3_CPG
config CLK_R8A7796
- bool
+ bool "R-Car M3-W clock support" if COMPILE_TEST
+ select CLK_RCAR_GEN3_CPG
+
+config CLK_R8A77995
+ bool "R-Car D3 clock support" if COMPILE_TEST
select CLK_RCAR_GEN3_CPG
config CLK_SH73A0
- bool
+ bool "SH-Mobile AG5 clock support" if COMPILE_TEST
select CLK_RENESAS_CPG_MSTP
select CLK_RENESAS_DIV6
# Family
config CLK_RCAR_GEN2
- bool
+ bool "R-Car Gen2 legacy clock support" if COMPILE_TEST
select CLK_RENESAS_CPG_MSTP
select CLK_RENESAS_DIV6
config CLK_RCAR_GEN2_CPG
- bool
+ bool "R-Car Gen2 CPG clock support" if COMPILE_TEST
select CLK_RENESAS_CPG_MSSR
config CLK_RCAR_GEN3_CPG
- bool
+ bool "R-Car Gen3 CPG clock support" if COMPILE_TEST
select CLK_RENESAS_CPG_MSSR
+config CLK_RCAR_USB2_CLOCK_SEL
+ bool "Renesas R-Car USB2 clock selector support"
+ depends on ARCH_RENESAS || COMPILE_TEST
+ help
+ This is a driver for R-Car USB2 clock selector
# Generic
config CLK_RENESAS_CPG_MSSR
- bool
+ bool "CPG/MSSR clock support" if COMPILE_TEST
select CLK_RENESAS_DIV6
config CLK_RENESAS_CPG_MSTP
- bool
+ bool "MSTP clock support" if COMPILE_TEST
config CLK_RENESAS_DIV6
bool "DIV6 clock support" if COMPILE_TEST
diff --git a/drivers/clk/renesas/Makefile b/drivers/clk/renesas/Makefile
index 02d04124371f..9bda3ec5b199 100644
--- a/drivers/clk/renesas/Makefile
+++ b/drivers/clk/renesas/Makefile
@@ -13,12 +13,14 @@ obj-$(CONFIG_CLK_R8A7792) += r8a7792-cpg-mssr.o
obj-$(CONFIG_CLK_R8A7794) += r8a7794-cpg-mssr.o
obj-$(CONFIG_CLK_R8A7795) += r8a7795-cpg-mssr.o
obj-$(CONFIG_CLK_R8A7796) += r8a7796-cpg-mssr.o
+obj-$(CONFIG_CLK_R8A77995) += r8a77995-cpg-mssr.o
obj-$(CONFIG_CLK_SH73A0) += clk-sh73a0.o
# Family
obj-$(CONFIG_CLK_RCAR_GEN2) += clk-rcar-gen2.o
obj-$(CONFIG_CLK_RCAR_GEN2_CPG) += rcar-gen2-cpg.o
obj-$(CONFIG_CLK_RCAR_GEN3_CPG) += rcar-gen3-cpg.o
+obj-$(CONFIG_CLK_RCAR_USB2_CLOCK_SEL) += rcar-usb2-clock-sel.o
# Generic
obj-$(CONFIG_CLK_RENESAS_CPG_MSSR) += renesas-cpg-mssr.o
diff --git a/drivers/clk/renesas/clk-div6.c b/drivers/clk/renesas/clk-div6.c
index 0627860233cb..3e0040c0ac87 100644
--- a/drivers/clk/renesas/clk-div6.c
+++ b/drivers/clk/renesas/clk-div6.c
@@ -29,6 +29,9 @@
* @hw: handle between common and hardware-specific interfaces
* @reg: IO-remapped register
* @div: divisor value (1-64)
+ * @src_shift: Shift to access the register bits to select the parent clock
+ * @src_width: Number of register bits to select the parent clock (may be 0)
+ * @parents: Array to map from valid parent clocks indices to hardware indices
*/
struct div6_clock {
struct clk_hw hw;
diff --git a/drivers/clk/renesas/clk-mstp.c b/drivers/clk/renesas/clk-mstp.c
index f1617dd044cb..500a9e4e03c4 100644
--- a/drivers/clk/renesas/clk-mstp.c
+++ b/drivers/clk/renesas/clk-mstp.c
@@ -335,7 +335,7 @@ void __init cpg_mstp_add_clk_domain(struct device_node *np)
u32 ncells;
if (of_property_read_u32(np, "#power-domain-cells", &ncells)) {
- pr_warn("%s lacks #power-domain-cells\n", np->full_name);
+ pr_warn("%pOF lacks #power-domain-cells\n", np);
return;
}
diff --git a/drivers/clk/renesas/clk-rcar-gen2.c b/drivers/clk/renesas/clk-rcar-gen2.c
index 51a2479ed5d7..0b2e56d0d94b 100644
--- a/drivers/clk/renesas/clk-rcar-gen2.c
+++ b/drivers/clk/renesas/clk-rcar-gen2.c
@@ -407,8 +407,7 @@ static void __init rcar_gen2_cpg_clocks_init(struct device_node *np)
if (rcar_rst_read_mode_pins(&cpg_mode)) {
/* Backward-compatibility with old DT */
- pr_warn("%s: failed to obtain mode pins from RST\n",
- np->full_name);
+ pr_warn("%pOF: failed to obtain mode pins from RST\n", np);
cpg_mode = rcar_gen2_read_mode_pins();
}
diff --git a/drivers/clk/renesas/r8a7792-cpg-mssr.c b/drivers/clk/renesas/r8a7792-cpg-mssr.c
index a832b9b6f7b0..7f85bbf20bf7 100644
--- a/drivers/clk/renesas/r8a7792-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a7792-cpg-mssr.c
@@ -118,6 +118,13 @@ static const struct mssr_mod_clk r8a7792_mod_clks[] __initconst = {
DEF_MOD("vin1", 810, R8A7792_CLK_ZG),
DEF_MOD("vin0", 811, R8A7792_CLK_ZG),
DEF_MOD("etheravb", 812, R8A7792_CLK_HP),
+ DEF_MOD("imr-lx3", 821, R8A7792_CLK_ZG),
+ DEF_MOD("imr-lsx3-1", 822, R8A7792_CLK_ZG),
+ DEF_MOD("imr-lsx3-0", 823, R8A7792_CLK_ZG),
+ DEF_MOD("imr-lsx3-5", 825, R8A7792_CLK_ZG),
+ DEF_MOD("imr-lsx3-4", 826, R8A7792_CLK_ZG),
+ DEF_MOD("imr-lsx3-3", 827, R8A7792_CLK_ZG),
+ DEF_MOD("imr-lsx3-2", 828, R8A7792_CLK_ZG),
DEF_MOD("gyro-adc", 901, R8A7792_CLK_P),
DEF_MOD("gpio7", 904, R8A7792_CLK_CP),
DEF_MOD("gpio6", 905, R8A7792_CLK_CP),
diff --git a/drivers/clk/renesas/r8a7795-cpg-mssr.c b/drivers/clk/renesas/r8a7795-cpg-mssr.c
index c091a8e024b8..762b2f8824f1 100644
--- a/drivers/clk/renesas/r8a7795-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a7795-cpg-mssr.c
@@ -305,23 +305,23 @@ static const unsigned int r8a7795_crit_mod_clks[] __initconst = {
(((md) & BIT(17)) >> 17))
static const struct rcar_gen3_cpg_pll_config cpg_pll_configs[16] __initconst = {
- /* EXTAL div PLL1 mult PLL3 mult */
- { 1, 192, 192, },
- { 1, 192, 128, },
- { 0, /* Prohibited setting */ },
- { 1, 192, 192, },
- { 1, 160, 160, },
- { 1, 160, 106, },
- { 0, /* Prohibited setting */ },
- { 1, 160, 160, },
- { 1, 128, 128, },
- { 1, 128, 84, },
- { 0, /* Prohibited setting */ },
- { 1, 128, 128, },
- { 2, 192, 192, },
- { 2, 192, 128, },
- { 0, /* Prohibited setting */ },
- { 2, 192, 192, },
+ /* EXTAL div PLL1 mult/div PLL3 mult/div */
+ { 1, 192, 1, 192, 1, },
+ { 1, 192, 1, 128, 1, },
+ { 0, /* Prohibited setting */ },
+ { 1, 192, 1, 192, 1, },
+ { 1, 160, 1, 160, 1, },
+ { 1, 160, 1, 106, 1, },
+ { 0, /* Prohibited setting */ },
+ { 1, 160, 1, 160, 1, },
+ { 1, 128, 1, 128, 1, },
+ { 1, 128, 1, 84, 1, },
+ { 0, /* Prohibited setting */ },
+ { 1, 128, 1, 128, 1, },
+ { 2, 192, 1, 192, 1, },
+ { 2, 192, 1, 128, 1, },
+ { 0, /* Prohibited setting */ },
+ { 2, 192, 1, 192, 1, },
};
static const struct soc_device_attribute r8a7795es1[] __initconst = {
diff --git a/drivers/clk/renesas/r8a7796-cpg-mssr.c b/drivers/clk/renesas/r8a7796-cpg-mssr.c
index acc6d0f153e1..e5e7fb212288 100644
--- a/drivers/clk/renesas/r8a7796-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a7796-cpg-mssr.c
@@ -138,6 +138,7 @@ static const struct mssr_mod_clk r8a7796_mod_clks[] __initconst = {
DEF_MOD("sdif0", 314, R8A7796_CLK_SD0),
DEF_MOD("pcie1", 318, R8A7796_CLK_S3D1),
DEF_MOD("pcie0", 319, R8A7796_CLK_S3D1),
+ DEF_MOD("usb3-if0", 328, R8A7796_CLK_S3D1),
DEF_MOD("usb-dmac0", 330, R8A7796_CLK_S3D1),
DEF_MOD("usb-dmac1", 331, R8A7796_CLK_S3D1),
DEF_MOD("rwdt", 402, R8A7796_CLK_R),
@@ -277,23 +278,23 @@ static const unsigned int r8a7796_crit_mod_clks[] __initconst = {
(((md) & BIT(17)) >> 17))
static const struct rcar_gen3_cpg_pll_config cpg_pll_configs[16] __initconst = {
- /* EXTAL div PLL1 mult PLL3 mult */
- { 1, 192, 192, },
- { 1, 192, 128, },
- { 0, /* Prohibited setting */ },
- { 1, 192, 192, },
- { 1, 160, 160, },
- { 1, 160, 106, },
- { 0, /* Prohibited setting */ },
- { 1, 160, 160, },
- { 1, 128, 128, },
- { 1, 128, 84, },
- { 0, /* Prohibited setting */ },
- { 1, 128, 128, },
- { 2, 192, 192, },
- { 2, 192, 128, },
- { 0, /* Prohibited setting */ },
- { 2, 192, 192, },
+ /* EXTAL div PLL1 mult/div PLL3 mult/div */
+ { 1, 192, 1, 192, 1, },
+ { 1, 192, 1, 128, 1, },
+ { 0, /* Prohibited setting */ },
+ { 1, 192, 1, 192, 1, },
+ { 1, 160, 1, 160, 1, },
+ { 1, 160, 1, 106, 1, },
+ { 0, /* Prohibited setting */ },
+ { 1, 160, 1, 160, 1, },
+ { 1, 128, 1, 128, 1, },
+ { 1, 128, 1, 84, 1, },
+ { 0, /* Prohibited setting */ },
+ { 1, 128, 1, 128, 1, },
+ { 2, 192, 1, 192, 1, },
+ { 2, 192, 1, 128, 1, },
+ { 0, /* Prohibited setting */ },
+ { 2, 192, 1, 192, 1, },
};
static int __init r8a7796_cpg_mssr_init(struct device *dev)
diff --git a/drivers/clk/renesas/r8a77995-cpg-mssr.c b/drivers/clk/renesas/r8a77995-cpg-mssr.c
new file mode 100644
index 000000000000..e594cf8ee63b
--- /dev/null
+++ b/drivers/clk/renesas/r8a77995-cpg-mssr.c
@@ -0,0 +1,236 @@
+/*
+ * r8a77995 Clock Pulse Generator / Module Standby and Software Reset
+ *
+ * Copyright (C) 2017 Glider bvba
+ *
+ * Based on r8a7795-cpg-mssr.c
+ *
+ * Copyright (C) 2015 Glider bvba
+ * Copyright (C) 2015 Renesas Electronics Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ */
+
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/soc/renesas/rcar-rst.h>
+
+#include <dt-bindings/clock/r8a77995-cpg-mssr.h>
+
+#include "renesas-cpg-mssr.h"
+#include "rcar-gen3-cpg.h"
+
+enum clk_ids {
+ /* Core Clock Outputs exported to DT */
+ LAST_DT_CORE_CLK = R8A77995_CLK_CP,
+
+ /* External Input Clocks */
+ CLK_EXTAL,
+
+ /* Internal Core Clocks */
+ CLK_MAIN,
+ CLK_PLL0,
+ CLK_PLL1,
+ CLK_PLL3,
+ CLK_PLL0D2,
+ CLK_PLL0D3,
+ CLK_PLL0D5,
+ CLK_PLL1D2,
+ CLK_PE,
+ CLK_S0,
+ CLK_S1,
+ CLK_S2,
+ CLK_S3,
+ CLK_SDSRC,
+ CLK_SSPSRC,
+
+ /* Module Clocks */
+ MOD_CLK_BASE
+};
+
+static const struct cpg_core_clk r8a77995_core_clks[] __initconst = {
+ /* External Clock Inputs */
+ DEF_INPUT("extal", CLK_EXTAL),
+
+ /* Internal Core Clocks */
+ DEF_BASE(".main", CLK_MAIN, CLK_TYPE_GEN3_MAIN, CLK_EXTAL),
+ DEF_BASE(".pll1", CLK_PLL1, CLK_TYPE_GEN3_PLL1, CLK_MAIN),
+ DEF_BASE(".pll3", CLK_PLL3, CLK_TYPE_GEN3_PLL3, CLK_MAIN),
+
+ DEF_FIXED(".pll0", CLK_PLL0, CLK_MAIN, 4, 250),
+ DEF_FIXED(".pll0d2", CLK_PLL0D2, CLK_PLL0, 2, 1),
+ DEF_FIXED(".pll0d3", CLK_PLL0D3, CLK_PLL0, 3, 1),
+ DEF_FIXED(".pll0d5", CLK_PLL0D5, CLK_PLL0, 5, 1),
+ DEF_FIXED(".pll1d2", CLK_PLL1D2, CLK_PLL1, 2, 1),
+ DEF_FIXED(".pe", CLK_PE, CLK_PLL0D3, 4, 1),
+ DEF_FIXED(".s0", CLK_S0, CLK_PLL1, 2, 1),
+ DEF_FIXED(".s1", CLK_S1, CLK_PLL1, 3, 1),
+ DEF_FIXED(".s2", CLK_S2, CLK_PLL1, 4, 1),
+ DEF_FIXED(".s3", CLK_S3, CLK_PLL1, 6, 1),
+ DEF_FIXED(".sdsrc", CLK_SDSRC, CLK_PLL1, 2, 1),
+
+ /* Core Clock Outputs */
+ DEF_FIXED("z2", R8A77995_CLK_Z2, CLK_PLL0D3, 1, 1),
+ DEF_FIXED("ztr", R8A77995_CLK_ZTR, CLK_PLL1, 6, 1),
+ DEF_FIXED("zt", R8A77995_CLK_ZT, CLK_PLL1, 4, 1),
+ DEF_FIXED("zx", R8A77995_CLK_ZX, CLK_PLL1, 3, 1),
+ DEF_FIXED("s0d1", R8A77995_CLK_S0D1, CLK_S0, 1, 1),
+ DEF_FIXED("s1d1", R8A77995_CLK_S1D1, CLK_S1, 1, 1),
+ DEF_FIXED("s1d2", R8A77995_CLK_S1D2, CLK_S1, 2, 1),
+ DEF_FIXED("s1d4", R8A77995_CLK_S1D4, CLK_S1, 4, 1),
+ DEF_FIXED("s2d1", R8A77995_CLK_S2D1, CLK_S2, 1, 1),
+ DEF_FIXED("s2d2", R8A77995_CLK_S2D2, CLK_S2, 2, 1),
+ DEF_FIXED("s2d4", R8A77995_CLK_S2D4, CLK_S2, 4, 1),
+ DEF_FIXED("s3d1", R8A77995_CLK_S3D1, CLK_S3, 1, 1),
+ DEF_FIXED("s3d2", R8A77995_CLK_S3D2, CLK_S3, 2, 1),
+ DEF_FIXED("s3d4", R8A77995_CLK_S3D4, CLK_S3, 4, 1),
+
+ DEF_FIXED("cl", R8A77995_CLK_CL, CLK_PLL1, 48, 1),
+ DEF_FIXED("cp", R8A77995_CLK_CP, CLK_EXTAL, 2, 1),
+ DEF_FIXED("osc", R8A77995_CLK_OSC, CLK_EXTAL, 384, 1),
+ DEF_FIXED("r", R8A77995_CLK_R, CLK_EXTAL, 1536, 1),
+
+ DEF_GEN3_PE("s1d4c", R8A77995_CLK_S1D4C, CLK_S1, 4, CLK_PE, 2),
+ DEF_GEN3_PE("s3d1c", R8A77995_CLK_S3D1C, CLK_S3, 1, CLK_PE, 1),
+ DEF_GEN3_PE("s3d2c", R8A77995_CLK_S3D2C, CLK_S3, 2, CLK_PE, 2),
+ DEF_GEN3_PE("s3d4c", R8A77995_CLK_S3D4C, CLK_S3, 4, CLK_PE, 4),
+
+ DEF_GEN3_SD("sd0", R8A77995_CLK_SD0, CLK_SDSRC, 0x268),
+
+ DEF_DIV6P1("canfd", R8A77995_CLK_CANFD, CLK_PLL0D3, 0x244),
+ DEF_DIV6P1("mso", R8A77995_CLK_MSO, CLK_PLL1D2, 0x014),
+};
+
+static const struct mssr_mod_clk r8a77995_mod_clks[] __initconst = {
+ DEF_MOD("scif5", 202, R8A77995_CLK_S3D4C),
+ DEF_MOD("scif4", 203, R8A77995_CLK_S3D4C),
+ DEF_MOD("scif3", 204, R8A77995_CLK_S3D4C),
+ DEF_MOD("scif1", 206, R8A77995_CLK_S3D4C),
+ DEF_MOD("scif0", 207, R8A77995_CLK_S3D4C),
+ DEF_MOD("msiof3", 208, R8A77995_CLK_MSO),
+ DEF_MOD("msiof2", 209, R8A77995_CLK_MSO),
+ DEF_MOD("msiof1", 210, R8A77995_CLK_MSO),
+ DEF_MOD("msiof0", 211, R8A77995_CLK_MSO),
+ DEF_MOD("sys-dmac2", 217, R8A77995_CLK_S3D1),
+ DEF_MOD("sys-dmac1", 218, R8A77995_CLK_S3D1),
+ DEF_MOD("sys-dmac0", 219, R8A77995_CLK_S3D1),
+ DEF_MOD("cmt3", 300, R8A77995_CLK_R),
+ DEF_MOD("cmt2", 301, R8A77995_CLK_R),
+ DEF_MOD("cmt1", 302, R8A77995_CLK_R),
+ DEF_MOD("cmt0", 303, R8A77995_CLK_R),
+ DEF_MOD("scif2", 310, R8A77995_CLK_S3D4C),
+ DEF_MOD("emmc0", 312, R8A77995_CLK_SD0),
+ DEF_MOD("usb-dmac0", 330, R8A77995_CLK_S3D1),
+ DEF_MOD("usb-dmac1", 331, R8A77995_CLK_S3D1),
+ DEF_MOD("rwdt", 402, R8A77995_CLK_R),
+ DEF_MOD("intc-ex", 407, R8A77995_CLK_CP),
+ DEF_MOD("intc-ap", 408, R8A77995_CLK_S3D1),
+ DEF_MOD("audmac0", 502, R8A77995_CLK_S3D1),
+ DEF_MOD("hscif3", 517, R8A77995_CLK_S3D1C),
+ DEF_MOD("hscif0", 520, R8A77995_CLK_S3D1C),
+ DEF_MOD("thermal", 522, R8A77995_CLK_CP),
+ DEF_MOD("pwm", 523, R8A77995_CLK_S3D4C),
+ DEF_MOD("fcpvd1", 602, R8A77995_CLK_S1D2),
+ DEF_MOD("fcpvd0", 603, R8A77995_CLK_S1D2),
+ DEF_MOD("fcpvbs", 607, R8A77995_CLK_S0D1),
+ DEF_MOD("vspd1", 622, R8A77995_CLK_S1D2),
+ DEF_MOD("vspd0", 623, R8A77995_CLK_S1D2),
+ DEF_MOD("vspbs", 627, R8A77995_CLK_S0D1),
+ DEF_MOD("ehci0", 703, R8A77995_CLK_S3D2),
+ DEF_MOD("hsusb", 704, R8A77995_CLK_S3D2),
+ DEF_MOD("du1", 723, R8A77995_CLK_S2D1),
+ DEF_MOD("du0", 724, R8A77995_CLK_S2D1),
+ DEF_MOD("lvds", 727, R8A77995_CLK_S2D1),
+ DEF_MOD("vin7", 804, R8A77995_CLK_S1D2),
+ DEF_MOD("vin6", 805, R8A77995_CLK_S1D2),
+ DEF_MOD("vin5", 806, R8A77995_CLK_S1D2),
+ DEF_MOD("vin4", 807, R8A77995_CLK_S1D2),
+ DEF_MOD("etheravb", 812, R8A77995_CLK_S3D2),
+ DEF_MOD("imr0", 823, R8A77995_CLK_S1D2),
+ DEF_MOD("gpio6", 906, R8A77995_CLK_S3D4),
+ DEF_MOD("gpio5", 907, R8A77995_CLK_S3D4),
+ DEF_MOD("gpio4", 908, R8A77995_CLK_S3D4),
+ DEF_MOD("gpio3", 909, R8A77995_CLK_S3D4),
+ DEF_MOD("gpio2", 910, R8A77995_CLK_S3D4),
+ DEF_MOD("gpio1", 911, R8A77995_CLK_S3D4),
+ DEF_MOD("gpio0", 912, R8A77995_CLK_S3D4),
+ DEF_MOD("can-fd", 914, R8A77995_CLK_S3D2),
+ DEF_MOD("can-if1", 915, R8A77995_CLK_S3D4),
+ DEF_MOD("can-if0", 916, R8A77995_CLK_S3D4),
+ DEF_MOD("i2c3", 928, R8A77995_CLK_S3D2),
+ DEF_MOD("i2c2", 929, R8A77995_CLK_S3D2),
+ DEF_MOD("i2c1", 930, R8A77995_CLK_S3D2),
+ DEF_MOD("i2c0", 931, R8A77995_CLK_S3D2),
+ DEF_MOD("ssi-all", 1005, R8A77995_CLK_S3D4),
+ DEF_MOD("ssi4", 1011, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi3", 1012, MOD_CLK_ID(1005)),
+ DEF_MOD("scu-all", 1017, R8A77995_CLK_S3D4),
+ DEF_MOD("scu-dvc1", 1018, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-dvc0", 1019, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-ctu1-mix1", 1020, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-ctu0-mix0", 1021, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src6", 1025, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src5", 1026, MOD_CLK_ID(1017)),
+};
+
+static const unsigned int r8a77995_crit_mod_clks[] __initconst = {
+ MOD_CLK_ID(408), /* INTC-AP (GIC) */
+};
+
+
+/*
+ * CPG Clock Data
+ */
+
+/*
+ * MD19 EXTAL (MHz) PLL0 PLL1 PLL3
+ *--------------------------------------------------------------------
+ * 0 48 x 1 x250/4 x100/3 x100/3
+ * 1 48 x 1 x250/4 x100/3 x116/6
+ */
+#define CPG_PLL_CONFIG_INDEX(md) (((md) & BIT(19)) >> 19)
+
+static const struct rcar_gen3_cpg_pll_config cpg_pll_configs[2] __initconst = {
+ /* EXTAL div PLL1 mult/div PLL3 mult/div */
+ { 1, 100, 3, 100, 3, },
+ { 1, 100, 3, 116, 6, },
+};
+
+static int __init r8a77995_cpg_mssr_init(struct device *dev)
+{
+ const struct rcar_gen3_cpg_pll_config *cpg_pll_config;
+ u32 cpg_mode;
+ int error;
+
+ error = rcar_rst_read_mode_pins(&cpg_mode);
+ if (error)
+ return error;
+
+ cpg_pll_config = &cpg_pll_configs[CPG_PLL_CONFIG_INDEX(cpg_mode)];
+
+ return rcar_gen3_cpg_init(cpg_pll_config, 0, cpg_mode);
+}
+
+const struct cpg_mssr_info r8a77995_cpg_mssr_info __initconst = {
+ /* Core Clocks */
+ .core_clks = r8a77995_core_clks,
+ .num_core_clks = ARRAY_SIZE(r8a77995_core_clks),
+ .last_dt_core_clk = LAST_DT_CORE_CLK,
+ .num_total_core_clks = MOD_CLK_BASE,
+
+ /* Module Clocks */
+ .mod_clks = r8a77995_mod_clks,
+ .num_mod_clks = ARRAY_SIZE(r8a77995_mod_clks),
+ .num_hw_mod_clks = 12 * 32,
+
+ /* Critical Module Clocks */
+ .crit_mod_clks = r8a77995_crit_mod_clks,
+ .num_crit_mod_clks = ARRAY_SIZE(r8a77995_crit_mod_clks),
+
+ /* Callbacks */
+ .init = r8a77995_cpg_mssr_init,
+ .cpg_clk_register = rcar_gen3_cpg_clk_register,
+};
diff --git a/drivers/clk/renesas/rcar-gen3-cpg.c b/drivers/clk/renesas/rcar-gen3-cpg.c
index 3dee900522b7..951105816547 100644
--- a/drivers/clk/renesas/rcar-gen3-cpg.c
+++ b/drivers/clk/renesas/rcar-gen3-cpg.c
@@ -60,6 +60,7 @@ struct sd_clock {
unsigned int div_num;
unsigned int div_min;
unsigned int div_max;
+ unsigned int cur_div_idx;
};
/* SDn divider
@@ -96,21 +97,10 @@ static const struct sd_div_table cpg_sd_div_table[] = {
static int cpg_sd_clock_enable(struct clk_hw *hw)
{
struct sd_clock *clock = to_sd_clock(hw);
- u32 val, sd_fc;
- unsigned int i;
-
- val = readl(clock->reg);
-
- sd_fc = val & CPG_SD_FC_MASK;
- for (i = 0; i < clock->div_num; i++)
- if (sd_fc == (clock->div_table[i].val & CPG_SD_FC_MASK))
- break;
-
- if (i >= clock->div_num)
- return -EINVAL;
+ u32 val = readl(clock->reg);
val &= ~(CPG_SD_STP_MASK);
- val |= clock->div_table[i].val & CPG_SD_STP_MASK;
+ val |= clock->div_table[clock->cur_div_idx].val & CPG_SD_STP_MASK;
writel(val, clock->reg);
@@ -135,21 +125,9 @@ static unsigned long cpg_sd_clock_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct sd_clock *clock = to_sd_clock(hw);
- unsigned long rate = parent_rate;
- u32 val, sd_fc;
- unsigned int i;
- val = readl(clock->reg);
-
- sd_fc = val & CPG_SD_FC_MASK;
- for (i = 0; i < clock->div_num; i++)
- if (sd_fc == (clock->div_table[i].val & CPG_SD_FC_MASK))
- break;
-
- if (i >= clock->div_num)
- return -EINVAL;
-
- return DIV_ROUND_CLOSEST(rate, clock->div_table[i].div);
+ return DIV_ROUND_CLOSEST(parent_rate,
+ clock->div_table[clock->cur_div_idx].div);
}
static unsigned int cpg_sd_clock_calc_div(struct sd_clock *clock,
@@ -190,6 +168,8 @@ static int cpg_sd_clock_set_rate(struct clk_hw *hw, unsigned long rate,
if (i >= clock->div_num)
return -EINVAL;
+ clock->cur_div_idx = i;
+
val = readl(clock->reg);
val &= ~(CPG_SD_STP_MASK | CPG_SD_FC_MASK);
val |= clock->div_table[i].val & (CPG_SD_STP_MASK | CPG_SD_FC_MASK);
@@ -215,6 +195,7 @@ static struct clk * __init cpg_sd_clk_register(const struct cpg_core_clk *core,
struct sd_clock *clock;
struct clk *clk;
unsigned int i;
+ u32 sd_fc;
clock = kzalloc(sizeof(*clock), GFP_KERNEL);
if (!clock)
@@ -231,6 +212,18 @@ static struct clk * __init cpg_sd_clk_register(const struct cpg_core_clk *core,
clock->div_table = cpg_sd_div_table;
clock->div_num = ARRAY_SIZE(cpg_sd_div_table);
+ sd_fc = readl(clock->reg) & CPG_SD_FC_MASK;
+ for (i = 0; i < clock->div_num; i++)
+ if (sd_fc == (clock->div_table[i].val & CPG_SD_FC_MASK))
+ break;
+
+ if (WARN_ON(i >= clock->div_num)) {
+ kfree(clock);
+ return ERR_PTR(-EINVAL);
+ }
+
+ clock->cur_div_idx = i;
+
clock->div_max = clock->div_table[0].div;
clock->div_min = clock->div_max;
for (i = 1; i < clock->div_num; i++) {
@@ -279,7 +272,7 @@ struct clk * __init rcar_gen3_cpg_clk_register(struct device *dev,
unsigned int div = 1;
u32 value;
- parent = clks[core->parent];
+ parent = clks[core->parent & 0xffff]; /* CLK_TYPE_PE uses high bits */
if (IS_ERR(parent))
return ERR_CAST(parent);
@@ -303,6 +296,7 @@ struct clk * __init rcar_gen3_cpg_clk_register(struct device *dev,
case CLK_TYPE_GEN3_PLL1:
mult = cpg_pll_config->pll1_mult;
+ div = cpg_pll_config->pll1_div;
break;
case CLK_TYPE_GEN3_PLL2:
@@ -320,6 +314,7 @@ struct clk * __init rcar_gen3_cpg_clk_register(struct device *dev,
case CLK_TYPE_GEN3_PLL3:
mult = cpg_pll_config->pll3_mult;
+ div = cpg_pll_config->pll3_div;
break;
case CLK_TYPE_GEN3_PLL4:
@@ -360,6 +355,24 @@ struct clk * __init rcar_gen3_cpg_clk_register(struct device *dev,
parent = clks[cpg_clk_extalr];
break;
+ case CLK_TYPE_GEN3_PE:
+ /*
+ * Peripheral clock with a fixed divider, selectable between
+ * clean and spread spectrum parents using MD12
+ */
+ if (cpg_mode & BIT(12)) {
+ /* Clean */
+ div = core->div & 0xffff;
+ } else {
+ /* SCCG */
+ parent = clks[core->parent >> 16];
+ if (IS_ERR(parent))
+ return ERR_CAST(parent);
+ div = core->div >> 16;
+ }
+ mult = 1;
+ break;
+
default:
return ERR_PTR(-EINVAL);
}
diff --git a/drivers/clk/renesas/rcar-gen3-cpg.h b/drivers/clk/renesas/rcar-gen3-cpg.h
index 073be54b5d03..d756ef8b78eb 100644
--- a/drivers/clk/renesas/rcar-gen3-cpg.h
+++ b/drivers/clk/renesas/rcar-gen3-cpg.h
@@ -20,15 +20,24 @@ enum rcar_gen3_clk_types {
CLK_TYPE_GEN3_PLL4,
CLK_TYPE_GEN3_SD,
CLK_TYPE_GEN3_R,
+ CLK_TYPE_GEN3_PE,
};
#define DEF_GEN3_SD(_name, _id, _parent, _offset) \
DEF_BASE(_name, _id, CLK_TYPE_GEN3_SD, _parent, .offset = _offset)
+#define DEF_GEN3_PE(_name, _id, _parent_sscg, _div_sscg, _parent_clean, \
+ _div_clean) \
+ DEF_BASE(_name, _id, CLK_TYPE_GEN3_PE, \
+ (_parent_sscg) << 16 | (_parent_clean), \
+ .div = (_div_sscg) << 16 | (_div_clean))
+
struct rcar_gen3_cpg_pll_config {
- unsigned int extal_div;
- unsigned int pll1_mult;
- unsigned int pll3_mult;
+ u8 extal_div;
+ u8 pll1_mult;
+ u8 pll1_div;
+ u8 pll3_mult;
+ u8 pll3_div;
};
#define CPG_RCKCR 0x240
diff --git a/drivers/clk/renesas/rcar-usb2-clock-sel.c b/drivers/clk/renesas/rcar-usb2-clock-sel.c
new file mode 100644
index 000000000000..6cd030a58964
--- /dev/null
+++ b/drivers/clk/renesas/rcar-usb2-clock-sel.c
@@ -0,0 +1,188 @@
+/*
+ * Renesas R-Car USB2.0 clock selector
+ *
+ * Copyright (C) 2017 Renesas Electronics Corp.
+ *
+ * Based on renesas-cpg-mssr.c
+ *
+ * Copyright (C) 2015 Glider bvba
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+
+#define USB20_CLKSET0 0x00
+#define CLKSET0_INTCLK_EN BIT(11)
+#define CLKSET0_PRIVATE BIT(0)
+#define CLKSET0_EXTAL_ONLY (CLKSET0_INTCLK_EN | CLKSET0_PRIVATE)
+
+struct usb2_clock_sel_priv {
+ void __iomem *base;
+ struct clk_hw hw;
+ bool extal;
+ bool xtal;
+};
+#define to_priv(_hw) container_of(_hw, struct usb2_clock_sel_priv, hw)
+
+static void usb2_clock_sel_enable_extal_only(struct usb2_clock_sel_priv *priv)
+{
+ u16 val = readw(priv->base + USB20_CLKSET0);
+
+ pr_debug("%s: enter %d %d %x\n", __func__,
+ priv->extal, priv->xtal, val);
+
+ if (priv->extal && !priv->xtal && val != CLKSET0_EXTAL_ONLY)
+ writew(CLKSET0_EXTAL_ONLY, priv->base + USB20_CLKSET0);
+}
+
+static void usb2_clock_sel_disable_extal_only(struct usb2_clock_sel_priv *priv)
+{
+ if (priv->extal && !priv->xtal)
+ writew(CLKSET0_PRIVATE, priv->base + USB20_CLKSET0);
+}
+
+static int usb2_clock_sel_enable(struct clk_hw *hw)
+{
+ usb2_clock_sel_enable_extal_only(to_priv(hw));
+
+ return 0;
+}
+
+static void usb2_clock_sel_disable(struct clk_hw *hw)
+{
+ usb2_clock_sel_disable_extal_only(to_priv(hw));
+}
+
+/*
+ * This module seems a mux, but this driver assumes a gate because
+ * ehci/ohci platform drivers don't support clk_set_parent() for now.
+ * If this driver acts as a gate, ehci/ohci-platform drivers don't need
+ * any modification.
+ */
+static const struct clk_ops usb2_clock_sel_clock_ops = {
+ .enable = usb2_clock_sel_enable,
+ .disable = usb2_clock_sel_disable,
+};
+
+static const struct of_device_id rcar_usb2_clock_sel_match[] = {
+ { .compatible = "renesas,rcar-gen3-usb2-clock-sel" },
+ { }
+};
+
+static int rcar_usb2_clock_sel_suspend(struct device *dev)
+{
+ struct usb2_clock_sel_priv *priv = dev_get_drvdata(dev);
+
+ usb2_clock_sel_disable_extal_only(priv);
+ pm_runtime_put(dev);
+
+ return 0;
+}
+
+static int rcar_usb2_clock_sel_resume(struct device *dev)
+{
+ struct usb2_clock_sel_priv *priv = dev_get_drvdata(dev);
+
+ pm_runtime_get_sync(dev);
+ usb2_clock_sel_enable_extal_only(priv);
+
+ return 0;
+}
+
+static int rcar_usb2_clock_sel_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct usb2_clock_sel_priv *priv = platform_get_drvdata(pdev);
+
+ of_clk_del_provider(dev->of_node);
+ clk_hw_unregister(&priv->hw);
+ pm_runtime_put(dev);
+ pm_runtime_disable(dev);
+
+ return 0;
+}
+
+static int rcar_usb2_clock_sel_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct usb2_clock_sel_priv *priv;
+ struct resource *res;
+ struct clk *clk;
+ struct clk_init_data init;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ pm_runtime_enable(dev);
+ pm_runtime_get_sync(dev);
+
+ clk = devm_clk_get(dev, "usb_extal");
+ if (!IS_ERR(clk) && !clk_prepare_enable(clk)) {
+ priv->extal = !!clk_get_rate(clk);
+ clk_disable_unprepare(clk);
+ }
+ clk = devm_clk_get(dev, "usb_xtal");
+ if (!IS_ERR(clk) && !clk_prepare_enable(clk)) {
+ priv->xtal = !!clk_get_rate(clk);
+ clk_disable_unprepare(clk);
+ }
+
+ if (!priv->extal && !priv->xtal) {
+ dev_err(dev, "This driver needs usb_extal or usb_xtal\n");
+ return -ENOENT;
+ }
+
+ platform_set_drvdata(pdev, priv);
+ dev_set_drvdata(dev, priv);
+
+ init.name = "rcar_usb2_clock_sel";
+ init.ops = &usb2_clock_sel_clock_ops;
+ init.flags = 0;
+ init.parent_names = NULL;
+ init.num_parents = 0;
+ priv->hw.init = &init;
+
+ clk = clk_register(NULL, &priv->hw);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ return of_clk_add_hw_provider(np, of_clk_hw_simple_get, &priv->hw);
+}
+
+static const struct dev_pm_ops rcar_usb2_clock_sel_pm_ops = {
+ .suspend = rcar_usb2_clock_sel_suspend,
+ .resume = rcar_usb2_clock_sel_resume,
+};
+
+static struct platform_driver rcar_usb2_clock_sel_driver = {
+ .driver = {
+ .name = "rcar-usb2-clock-sel",
+ .of_match_table = rcar_usb2_clock_sel_match,
+ .pm = &rcar_usb2_clock_sel_pm_ops,
+ },
+ .probe = rcar_usb2_clock_sel_probe,
+ .remove = rcar_usb2_clock_sel_remove,
+};
+builtin_platform_driver(rcar_usb2_clock_sel_driver);
+
+MODULE_DESCRIPTION("Renesas R-Car USB2 clock selector Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/renesas/renesas-cpg-mssr.c b/drivers/clk/renesas/renesas-cpg-mssr.c
index 1f607c806f9b..e580a5e6346c 100644
--- a/drivers/clk/renesas/renesas-cpg-mssr.c
+++ b/drivers/clk/renesas/renesas-cpg-mssr.c
@@ -680,6 +680,12 @@ static const struct of_device_id cpg_mssr_match[] = {
.data = &r8a7796_cpg_mssr_info,
},
#endif
+#ifdef CONFIG_CLK_R8A77995
+ {
+ .compatible = "renesas,r8a77995-cpg-mssr",
+ .data = &r8a77995_cpg_mssr_info,
+ },
+#endif
{ /* sentinel */ }
};
diff --git a/drivers/clk/renesas/renesas-cpg-mssr.h b/drivers/clk/renesas/renesas-cpg-mssr.h
index 43d7c7f6832d..94b9071d1061 100644
--- a/drivers/clk/renesas/renesas-cpg-mssr.h
+++ b/drivers/clk/renesas/renesas-cpg-mssr.h
@@ -138,6 +138,7 @@ extern const struct cpg_mssr_info r8a7792_cpg_mssr_info;
extern const struct cpg_mssr_info r8a7794_cpg_mssr_info;
extern const struct cpg_mssr_info r8a7795_cpg_mssr_info;
extern const struct cpg_mssr_info r8a7796_cpg_mssr_info;
+extern const struct cpg_mssr_info r8a77995_cpg_mssr_info;
/*
diff --git a/drivers/clk/rockchip/clk-rk3128.c b/drivers/clk/rockchip/clk-rk3128.c
index e243f2eae68f..62d7854e4b87 100644
--- a/drivers/clk/rockchip/clk-rk3128.c
+++ b/drivers/clk/rockchip/clk-rk3128.c
@@ -201,7 +201,7 @@ static struct rockchip_clk_branch rk3128_uart2_fracmux __initdata =
MUX(SCLK_UART2, "sclk_uart2", mux_uart2_p, CLK_SET_RATE_PARENT,
RK2928_CLKSEL_CON(15), 8, 2, MFLAGS);
-static struct rockchip_clk_branch rk3128_clk_branches[] __initdata = {
+static struct rockchip_clk_branch common_clk_branches[] __initdata = {
/*
* Clock-Architecture Diagram 1
*/
@@ -459,10 +459,6 @@ static struct rockchip_clk_branch rk3128_clk_branches[] __initdata = {
RK2928_CLKSEL_CON(2), 14, 2, MFLAGS, 8, 5, DFLAGS,
RK2928_CLKGATE_CON(10), 15, GFLAGS),
- COMPOSITE(SCLK_SFC, "sclk_sfc", mux_sclk_sfc_src_p, 0,
- RK2928_CLKSEL_CON(11), 14, 2, MFLAGS, 8, 5, DFLAGS,
- RK2928_CLKGATE_CON(3), 15, GFLAGS),
-
COMPOSITE_NOMUX(PCLK_PMU_PRE, "pclk_pmu_pre", "cpll", 0,
RK2928_CLKSEL_CON(29), 8, 6, DFLAGS,
RK2928_CLKGATE_CON(1), 0, GFLAGS),
@@ -495,7 +491,6 @@ static struct rockchip_clk_branch rk3128_clk_branches[] __initdata = {
GATE(ACLK_DMAC, "aclk_dmac", "aclk_peri", 0, RK2928_CLKGATE_CON(5), 1, GFLAGS),
GATE(0, "aclk_peri_niu", "aclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(9), 15, GFLAGS),
GATE(0, "aclk_cpu_to_peri", "aclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(4), 2, GFLAGS),
- GATE(HCLK_GPS, "hclk_gps", "aclk_peri", 0, RK2928_CLKGATE_CON(3), 14, GFLAGS),
GATE(HCLK_I2S_8CH, "hclk_i2s_8ch", "hclk_peri", 0, RK2928_CLKGATE_CON(7), 4, GFLAGS),
GATE(0, "hclk_peri_matrix", "hclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(4), 0, GFLAGS),
@@ -541,7 +536,6 @@ static struct rockchip_clk_branch rk3128_clk_branches[] __initdata = {
GATE(0, "hclk_rom", "hclk_cpu", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(5), 6, GFLAGS),
GATE(HCLK_CRYPTO, "hclk_crypto", "hclk_cpu", 0, RK2928_CLKGATE_CON(3), 5, GFLAGS),
- GATE(PCLK_HDMI, "pclk_hdmi", "pclk_cpu", 0, RK2928_CLKGATE_CON(3), 8, GFLAGS),
GATE(PCLK_ACODEC, "pclk_acodec", "pclk_cpu", 0, RK2928_CLKGATE_CON(5), 14, GFLAGS),
GATE(0, "pclk_ddrupctl", "pclk_cpu", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(5), 7, GFLAGS),
GATE(0, "pclk_grf", "pclk_cpu", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(5), 4, GFLAGS),
@@ -561,6 +555,21 @@ static struct rockchip_clk_branch rk3128_clk_branches[] __initdata = {
MMC(SCLK_EMMC_SAMPLE, "emmc_sample", "sclk_emmc", RK3228_EMMC_CON1, 0),
};
+static struct rockchip_clk_branch rk3126_clk_branches[] __initdata = {
+ GATE(0, "pclk_stimer", "pclk_cpu", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(3), 15, GFLAGS),
+ GATE(0, "pclk_s_efuse", "pclk_cpu", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(3), 14, GFLAGS),
+ GATE(0, "pclk_sgrf", "pclk_cpu", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(3), 8, GFLAGS),
+};
+
+static struct rockchip_clk_branch rk3128_clk_branches[] __initdata = {
+ COMPOSITE(SCLK_SFC, "sclk_sfc", mux_sclk_sfc_src_p, 0,
+ RK2928_CLKSEL_CON(11), 14, 2, MFLAGS, 8, 5, DFLAGS,
+ RK2928_CLKGATE_CON(3), 15, GFLAGS),
+
+ GATE(HCLK_GPS, "hclk_gps", "aclk_peri", 0, RK2928_CLKGATE_CON(3), 14, GFLAGS),
+ GATE(PCLK_HDMI, "pclk_hdmi", "pclk_cpu", 0, RK2928_CLKGATE_CON(3), 8, GFLAGS),
+};
+
static const char *const rk3128_critical_clocks[] __initconst = {
"aclk_cpu",
"hclk_cpu",
@@ -570,7 +579,7 @@ static const char *const rk3128_critical_clocks[] __initconst = {
"pclk_peri",
};
-static void __init rk3128_clk_init(struct device_node *np)
+static struct rockchip_clk_provider *__init rk3128_common_clk_init(struct device_node *np)
{
struct rockchip_clk_provider *ctx;
void __iomem *reg_base;
@@ -578,23 +587,21 @@ static void __init rk3128_clk_init(struct device_node *np)
reg_base = of_iomap(np, 0);
if (!reg_base) {
pr_err("%s: could not map cru region\n", __func__);
- return;
+ return ERR_PTR(-ENOMEM);
}
ctx = rockchip_clk_init(np, reg_base, CLK_NR_CLKS);
if (IS_ERR(ctx)) {
pr_err("%s: rockchip clk init failed\n", __func__);
iounmap(reg_base);
- return;
+ return ERR_PTR(-ENOMEM);
}
rockchip_clk_register_plls(ctx, rk3128_pll_clks,
ARRAY_SIZE(rk3128_pll_clks),
RK3128_GRF_SOC_STATUS0);
- rockchip_clk_register_branches(ctx, rk3128_clk_branches,
- ARRAY_SIZE(rk3128_clk_branches));
- rockchip_clk_protect_critical(rk3128_critical_clocks,
- ARRAY_SIZE(rk3128_critical_clocks));
+ rockchip_clk_register_branches(ctx, common_clk_branches,
+ ARRAY_SIZE(common_clk_branches));
rockchip_clk_register_armclk(ctx, ARMCLK, "armclk",
mux_armclk_p, ARRAY_SIZE(mux_armclk_p),
@@ -606,6 +613,40 @@ static void __init rk3128_clk_init(struct device_node *np)
rockchip_register_restart_notifier(ctx, RK2928_GLB_SRST_FST, NULL);
+ return ctx;
+}
+
+static void __init rk3126_clk_init(struct device_node *np)
+{
+ struct rockchip_clk_provider *ctx;
+
+ ctx = rk3128_common_clk_init(np);
+ if (IS_ERR(ctx))
+ return;
+
+ rockchip_clk_register_branches(ctx, rk3126_clk_branches,
+ ARRAY_SIZE(rk3126_clk_branches));
+ rockchip_clk_protect_critical(rk3128_critical_clocks,
+ ARRAY_SIZE(rk3128_critical_clocks));
+
+ rockchip_clk_of_add_provider(np, ctx);
+}
+
+CLK_OF_DECLARE(rk3126_cru, "rockchip,rk3126-cru", rk3126_clk_init);
+
+static void __init rk3128_clk_init(struct device_node *np)
+{
+ struct rockchip_clk_provider *ctx;
+
+ ctx = rk3128_common_clk_init(np);
+ if (IS_ERR(ctx))
+ return;
+
+ rockchip_clk_register_branches(ctx, rk3128_clk_branches,
+ ARRAY_SIZE(rk3128_clk_branches));
+ rockchip_clk_protect_critical(rk3128_critical_clocks,
+ ARRAY_SIZE(rk3128_critical_clocks));
+
rockchip_clk_of_add_provider(np, ctx);
}
diff --git a/drivers/clk/rockchip/clk-rk3228.c b/drivers/clk/rockchip/clk-rk3228.c
index bb405d9044a3..11e7f2d1c054 100644
--- a/drivers/clk/rockchip/clk-rk3228.c
+++ b/drivers/clk/rockchip/clk-rk3228.c
@@ -391,7 +391,7 @@ static struct rockchip_clk_branch rk3228_clk_branches[] __initdata = {
RK2928_CLKSEL_CON(11), 8, 2, MFLAGS, 0, 8, DFLAGS,
RK2928_CLKGATE_CON(2), 11, GFLAGS),
- COMPOSITE_NODIV(0, "sclk_sdio_src", mux_mmc_src_p, 0,
+ COMPOSITE_NODIV(SCLK_SDIO_SRC, "sclk_sdio_src", mux_mmc_src_p, 0,
RK2928_CLKSEL_CON(11), 10, 2, MFLAGS,
RK2928_CLKGATE_CON(2), 13, GFLAGS),
DIV(SCLK_SDIO, "sclk_sdio", "sclk_sdio_src", 0,
diff --git a/drivers/clk/rockchip/clk-rv1108.c b/drivers/clk/rockchip/clk-rv1108.c
index 7c05ab366348..089cb17925e5 100644
--- a/drivers/clk/rockchip/clk-rv1108.c
+++ b/drivers/clk/rockchip/clk-rv1108.c
@@ -93,9 +93,24 @@ static struct rockchip_pll_rate_table rv1108_pll_rates[] = {
}
static struct rockchip_cpuclk_rate_table rv1108_cpuclk_rates[] __initdata = {
- RV1108_CPUCLK_RATE(816000000, 4),
- RV1108_CPUCLK_RATE(600000000, 4),
- RV1108_CPUCLK_RATE(312000000, 4),
+ RV1108_CPUCLK_RATE(1608000000, 7),
+ RV1108_CPUCLK_RATE(1512000000, 7),
+ RV1108_CPUCLK_RATE(1488000000, 5),
+ RV1108_CPUCLK_RATE(1416000000, 5),
+ RV1108_CPUCLK_RATE(1392000000, 5),
+ RV1108_CPUCLK_RATE(1296000000, 5),
+ RV1108_CPUCLK_RATE(1200000000, 5),
+ RV1108_CPUCLK_RATE(1104000000, 5),
+ RV1108_CPUCLK_RATE(1008000000, 5),
+ RV1108_CPUCLK_RATE(912000000, 5),
+ RV1108_CPUCLK_RATE(816000000, 3),
+ RV1108_CPUCLK_RATE(696000000, 3),
+ RV1108_CPUCLK_RATE(600000000, 3),
+ RV1108_CPUCLK_RATE(500000000, 3),
+ RV1108_CPUCLK_RATE(408000000, 1),
+ RV1108_CPUCLK_RATE(312000000, 1),
+ RV1108_CPUCLK_RATE(216000000, 1),
+ RV1108_CPUCLK_RATE(96000000, 1),
};
static const struct rockchip_cpuclk_reg_data rv1108_cpuclk_data = {
@@ -105,7 +120,7 @@ static const struct rockchip_cpuclk_reg_data rv1108_cpuclk_data = {
.mux_core_alt = 1,
.mux_core_main = 0,
.mux_core_shift = 8,
- .mux_core_mask = 0x1,
+ .mux_core_mask = 0x3,
};
PNAME(mux_pll_p) = { "xin24m", "xin24m"};
@@ -114,30 +129,42 @@ PNAME(mux_armclk_p) = { "apll_core", "gpll_core", "dpll_core" };
PNAME(mux_usb480m_pre_p) = { "usbphy", "xin24m" };
PNAME(mux_hdmiphy_phy_p) = { "hdmiphy", "xin24m" };
PNAME(mux_dclk_hdmiphy_pre_p) = { "dclk_hdmiphy_src_gpll", "dclk_hdmiphy_src_dpll" };
-PNAME(mux_pll_src_4plls_p) = { "dpll", "hdmiphy", "gpll", "usb480m" };
+PNAME(mux_pll_src_4plls_p) = { "dpll", "gpll", "hdmiphy", "usb480m" };
PNAME(mux_pll_src_3plls_p) = { "apll", "gpll", "dpll" };
PNAME(mux_pll_src_2plls_p) = { "dpll", "gpll" };
PNAME(mux_pll_src_apll_gpll_p) = { "apll", "gpll" };
-PNAME(mux_aclk_peri_src_p) = { "aclk_peri_src_dpll", "aclk_peri_src_gpll" };
+PNAME(mux_aclk_peri_src_p) = { "aclk_peri_src_gpll", "aclk_peri_src_dpll" };
PNAME(mux_aclk_bus_src_p) = { "aclk_bus_src_gpll", "aclk_bus_src_apll", "aclk_bus_src_dpll" };
PNAME(mux_mmc_src_p) = { "dpll", "gpll", "xin24m", "usb480m" };
PNAME(mux_pll_src_dpll_gpll_usb480m_p) = { "dpll", "gpll", "usb480m" };
PNAME(mux_uart0_p) = { "uart0_src", "uart0_frac", "xin24m" };
PNAME(mux_uart1_p) = { "uart1_src", "uart1_frac", "xin24m" };
PNAME(mux_uart2_p) = { "uart2_src", "uart2_frac", "xin24m" };
-PNAME(mux_sclk_macphy_p) = { "sclk_macphy_pre", "ext_gmac" };
+PNAME(mux_sclk_mac_p) = { "sclk_mac_pre", "ext_gmac" };
PNAME(mux_i2s0_pre_p) = { "i2s0_src", "i2s0_frac", "ext_i2s", "xin12m" };
PNAME(mux_i2s_out_p) = { "i2s0_pre", "xin12m" };
-PNAME(mux_i2s1_p) = { "i2s1_src", "i2s1_frac", "xin12m" };
-PNAME(mux_i2s2_p) = { "i2s2_src", "i2s2_frac", "xin12m" };
+PNAME(mux_i2s1_p) = { "i2s1_src", "i2s1_frac", "dummy", "xin12m" };
+PNAME(mux_i2s2_p) = { "i2s2_src", "i2s2_frac", "dummy", "xin12m" };
+PNAME(mux_wifi_src_p) = { "gpll", "xin24m" };
+PNAME(mux_cifout_src_p) = { "hdmiphy", "gpll" };
+PNAME(mux_cifout_p) = { "sclk_cifout_src", "xin24m" };
+PNAME(mux_sclk_cif0_src_p) = { "pclk_vip", "clk_cif0_chn_out", "pclkin_cvbs2cif" };
+PNAME(mux_sclk_cif1_src_p) = { "pclk_vip", "clk_cif1_chn_out", "pclkin_cvbs2cif" };
+PNAME(mux_sclk_cif2_src_p) = { "pclk_vip", "clk_cif2_chn_out", "pclkin_cvbs2cif" };
+PNAME(mux_sclk_cif3_src_p) = { "pclk_vip", "clk_cif3_chn_out", "pclkin_cvbs2cif" };
+PNAME(mux_dsp_src_p) = { "dpll", "gpll", "apll", "usb480m" };
+PNAME(mux_dclk_hdmiphy_p) = { "hdmiphy", "xin24m" };
+PNAME(mux_dclk_vop_p) = { "dclk_hdmiphy", "dclk_vop_src" };
+PNAME(mux_hdmi_cec_src_p) = { "dpll", "gpll", "xin24m" };
+PNAME(mux_cvbs_src_p) = { "apll", "io_cvbs_clkin", "hdmiphy", "gpll" };
static struct rockchip_pll_clock rv1108_pll_clks[] __initdata = {
[apll] = PLL(pll_rk3399, PLL_APLL, "apll", mux_pll_p, 0, RV1108_PLL_CON(0),
- RV1108_PLL_CON(3), 8, 31, 0, rv1108_pll_rates),
+ RV1108_PLL_CON(3), 8, 0, 0, rv1108_pll_rates),
[dpll] = PLL(pll_rk3399, PLL_DPLL, "dpll", mux_pll_p, 0, RV1108_PLL_CON(8),
- RV1108_PLL_CON(11), 8, 31, 0, NULL),
+ RV1108_PLL_CON(11), 8, 1, 0, NULL),
[gpll] = PLL(pll_rk3399, PLL_GPLL, "gpll", mux_pll_p, 0, RV1108_PLL_CON(16),
- RV1108_PLL_CON(19), 8, 31, ROCKCHIP_PLL_SYNC_RATE, rv1108_pll_rates),
+ RV1108_PLL_CON(19), 8, 2, 0, rv1108_pll_rates),
};
#define MFLAGS CLK_MUX_HIWORD_MASK
@@ -170,10 +197,10 @@ static struct rockchip_clk_branch rv1108_i2s2_fracmux __initdata =
RV1108_CLKSEL_CON(7), 12, 2, MFLAGS);
static struct rockchip_clk_branch rv1108_clk_branches[] __initdata = {
- MUX(0, "hdmi_phy", mux_hdmiphy_phy_p, CLK_SET_RATE_PARENT,
- RV1108_MISC_CON, 13, 2, MFLAGS),
+ MUX(0, "hdmiphy", mux_hdmiphy_phy_p, CLK_SET_RATE_PARENT,
+ RV1108_MISC_CON, 13, 1, MFLAGS),
MUX(0, "usb480m", mux_usb480m_pre_p, CLK_SET_RATE_PARENT,
- RV1108_MISC_CON, 15, 2, MFLAGS),
+ RV1108_MISC_CON, 15, 1, MFLAGS),
/*
* Clock-Architecture Diagram 2
*/
@@ -197,50 +224,212 @@ static struct rockchip_clk_branch rv1108_clk_branches[] __initdata = {
RV1108_CLKGATE_CON(11), 1, GFLAGS),
/* PD_RKVENC */
+ COMPOSITE(0, "aclk_rkvenc_pre", mux_pll_src_4plls_p, 0,
+ RV1108_CLKSEL_CON(37), 6, 2, MFLAGS, 0, 5, DFLAGS,
+ RV1108_CLKGATE_CON(8), 8, GFLAGS),
+ FACTOR_GATE(0, "hclk_rkvenc_pre", "aclk_rkvenc_pre", 0, 1, 4,
+ RV1108_CLKGATE_CON(8), 10, GFLAGS),
+ COMPOSITE(SCLK_VENC_CORE, "clk_venc_core", mux_pll_src_4plls_p, 0,
+ RV1108_CLKSEL_CON(37), 14, 2, MFLAGS, 8, 5, DFLAGS,
+ RV1108_CLKGATE_CON(8), 9, GFLAGS),
+ GATE(ACLK_RKVENC, "aclk_rkvenc", "aclk_rkvenc_pre", 0,
+ RV1108_CLKGATE_CON(19), 8, GFLAGS),
+ GATE(HCLK_RKVENC, "hclk_rkvenc", "hclk_rkvenc_pre", 0,
+ RV1108_CLKGATE_CON(19), 9, GFLAGS),
+ GATE(0, "aclk_rkvenc_niu", "aclk_rkvenc_pre", CLK_IGNORE_UNUSED,
+ RV1108_CLKGATE_CON(19), 11, GFLAGS),
+ GATE(0, "hclk_rkvenc_niu", "hclk_rkvenc_pre", CLK_IGNORE_UNUSED,
+ RV1108_CLKGATE_CON(19), 10, GFLAGS),
/* PD_RKVDEC */
+ COMPOSITE(SCLK_HEVC_CORE, "sclk_hevc_core", mux_pll_src_4plls_p, 0,
+ RV1108_CLKSEL_CON(36), 6, 2, MFLAGS, 0, 5, DFLAGS,
+ RV1108_CLKGATE_CON(8), 2, GFLAGS),
+ FACTOR_GATE(0, "hclk_rkvdec_pre", "sclk_hevc_core", 0, 1, 4,
+ RV1108_CLKGATE_CON(8), 10, GFLAGS),
+ COMPOSITE(SCLK_HEVC_CABAC, "clk_hevc_cabac", mux_pll_src_4plls_p, 0,
+ RV1108_CLKSEL_CON(35), 14, 2, MFLAGS, 8, 5, DFLAGS,
+ RV1108_CLKGATE_CON(8), 1, GFLAGS),
+
+ COMPOSITE(0, "aclk_rkvdec_pre", mux_pll_src_4plls_p, 0,
+ RV1108_CLKSEL_CON(35), 6, 2, MFLAGS, 0, 5, DFLAGS,
+ RV1108_CLKGATE_CON(8), 0, GFLAGS),
+ COMPOSITE(0, "aclk_vpu_pre", mux_pll_src_4plls_p, 0,
+ RV1108_CLKSEL_CON(36), 14, 2, MFLAGS, 8, 5, DFLAGS,
+ RV1108_CLKGATE_CON(8), 3, GFLAGS),
+ GATE(ACLK_RKVDEC, "aclk_rkvdec", "aclk_rkvdec_pre", 0,
+ RV1108_CLKGATE_CON(19), 0, GFLAGS),
+ GATE(ACLK_VPU, "aclk_vpu", "aclk_vpu_pre", 0,
+ RV1108_CLKGATE_CON(19), 1, GFLAGS),
+ GATE(HCLK_RKVDEC, "hclk_rkvdec", "hclk_rkvdec_pre", 0,
+ RV1108_CLKGATE_CON(19), 2, GFLAGS),
+ GATE(HCLK_VPU, "hclk_vpu", "hclk_rkvdec_pre", 0,
+ RV1108_CLKGATE_CON(19), 3, GFLAGS),
+ GATE(0, "aclk_rkvdec_niu", "aclk_rkvdec_pre", CLK_IGNORE_UNUSED,
+ RV1108_CLKGATE_CON(19), 4, GFLAGS),
+ GATE(0, "hclk_rkvdec_niu", "hclk_rkvdec_pre", CLK_IGNORE_UNUSED,
+ RV1108_CLKGATE_CON(19), 5, GFLAGS),
+ GATE(0, "aclk_vpu_niu", "aclk_vpu_pre", CLK_IGNORE_UNUSED,
+ RV1108_CLKGATE_CON(19), 6, GFLAGS),
/* PD_PMU_wrapper */
COMPOSITE_NOMUX(0, "pmu_24m_ena", "gpll", CLK_IGNORE_UNUSED,
RV1108_CLKSEL_CON(38), 0, 5, DFLAGS,
RV1108_CLKGATE_CON(8), 12, GFLAGS),
- GATE(0, "pmu", "pmu_24m_ena", CLK_IGNORE_UNUSED,
+ GATE(0, "pclk_pmu", "pmu_24m_ena", CLK_IGNORE_UNUSED,
RV1108_CLKGATE_CON(10), 0, GFLAGS),
- GATE(0, "intmem1", "pmu_24m_ena", CLK_IGNORE_UNUSED,
+ GATE(0, "pclk_intmem1", "pmu_24m_ena", CLK_IGNORE_UNUSED,
RV1108_CLKGATE_CON(10), 1, GFLAGS),
- GATE(0, "gpio0_pmu", "pmu_24m_ena", CLK_IGNORE_UNUSED,
+ GATE(PCLK_GPIO0_PMU, "pclk_gpio0_pmu", "pmu_24m_ena", 0,
RV1108_CLKGATE_CON(10), 2, GFLAGS),
- GATE(0, "pmugrf", "pmu_24m_ena", CLK_IGNORE_UNUSED,
+ GATE(0, "pclk_pmugrf", "pmu_24m_ena", CLK_IGNORE_UNUSED,
RV1108_CLKGATE_CON(10), 3, GFLAGS),
- GATE(0, "pmu_noc", "pmu_24m_ena", CLK_IGNORE_UNUSED,
+ GATE(0, "pclk_pmu_niu", "pmu_24m_ena", CLK_IGNORE_UNUSED,
RV1108_CLKGATE_CON(10), 4, GFLAGS),
- GATE(0, "i2c0_pmu_pclk", "pmu_24m_ena", CLK_IGNORE_UNUSED,
+ GATE(PCLK_I2C0_PMU, "pclk_i2c0_pmu", "pmu_24m_ena", 0,
RV1108_CLKGATE_CON(10), 5, GFLAGS),
- GATE(0, "pwm0_pmu_pclk", "pmu_24m_ena", CLK_IGNORE_UNUSED,
+ GATE(PCLK_PWM0_PMU, "pclk_pwm0_pmu", "pmu_24m_ena", 0,
RV1108_CLKGATE_CON(10), 6, GFLAGS),
- COMPOSITE(0, "pwm0_pmu_clk", mux_pll_src_2plls_p, CLK_IGNORE_UNUSED,
+ COMPOSITE(SCLK_PWM0_PMU, "sclk_pwm0_pmu", mux_pll_src_2plls_p, 0,
RV1108_CLKSEL_CON(12), 7, 1, MFLAGS, 0, 7, DFLAGS,
RV1108_CLKGATE_CON(8), 15, GFLAGS),
- COMPOSITE(0, "i2c0_pmu_clk", mux_pll_src_2plls_p, CLK_IGNORE_UNUSED,
+ COMPOSITE(SCLK_I2C0_PMU, "sclk_i2c0_pmu", mux_pll_src_2plls_p, 0,
RV1108_CLKSEL_CON(19), 7, 1, MFLAGS, 0, 7, DFLAGS,
RV1108_CLKGATE_CON(8), 14, GFLAGS),
GATE(0, "pvtm_pmu", "xin24m", CLK_IGNORE_UNUSED,
RV1108_CLKGATE_CON(8), 13, GFLAGS),
/*
+ * Clock-Architecture Diagram 3
+ */
+ COMPOSITE(SCLK_WIFI, "sclk_wifi", mux_wifi_src_p, 0,
+ RV1108_CLKSEL_CON(28), 15, 1, MFLAGS, 8, 6, DFLAGS,
+ RV1108_CLKGATE_CON(9), 8, GFLAGS),
+ COMPOSITE_NODIV(0, "sclk_cifout_src", mux_cifout_src_p, 0,
+ RV1108_CLKSEL_CON(40), 8, 1, MFLAGS,
+ RV1108_CLKGATE_CON(9), 11, GFLAGS),
+ COMPOSITE_NOGATE(SCLK_CIFOUT, "sclk_cifout", mux_cifout_p, 0,
+ RV1108_CLKSEL_CON(40), 12, 1, MFLAGS, 0, 5, DFLAGS),
+ COMPOSITE_NOMUX(SCLK_MIPI_CSI_OUT, "sclk_mipi_csi_out", "xin24m", 0,
+ RV1108_CLKSEL_CON(41), 0, 5, DFLAGS,
+ RV1108_CLKGATE_CON(9), 12, GFLAGS),
+
+ GATE(0, "pclk_acodecphy", "pclk_top_pre", CLK_IGNORE_UNUSED,
+ RV1108_CLKGATE_CON(14), 6, GFLAGS),
+ GATE(0, "pclk_usbgrf", "pclk_top_pre", CLK_IGNORE_UNUSED,
+ RV1108_CLKGATE_CON(14), 14, GFLAGS),
+
+ GATE(ACLK_CIF0, "aclk_cif0", "aclk_vio1_pre", 0,
+ RV1108_CLKGATE_CON(18), 10, GFLAGS),
+ GATE(HCLK_CIF0, "hclk_cif0", "hclk_vio_pre", 0,
+ RV1108_CLKGATE_CON(18), 10, GFLAGS),
+ COMPOSITE_NODIV(SCLK_CIF0, "sclk_cif0", mux_sclk_cif0_src_p, 0,
+ RV1108_CLKSEL_CON(31), 0, 2, MFLAGS,
+ RV1108_CLKGATE_CON(7), 9, GFLAGS),
+ GATE(ACLK_CIF1, "aclk_cif1", "aclk_vio1_pre", 0,
+ RV1108_CLKGATE_CON(17), 6, GFLAGS),
+ GATE(HCLK_CIF1, "hclk_cif1", "hclk_vio_pre", 0,
+ RV1108_CLKGATE_CON(17), 7, GFLAGS),
+ COMPOSITE_NODIV(SCLK_CIF1, "sclk_cif1", mux_sclk_cif1_src_p, 0,
+ RV1108_CLKSEL_CON(31), 2, 2, MFLAGS,
+ RV1108_CLKGATE_CON(7), 10, GFLAGS),
+ GATE(ACLK_CIF2, "aclk_cif2", "aclk_vio1_pre", 0,
+ RV1108_CLKGATE_CON(17), 8, GFLAGS),
+ GATE(HCLK_CIF2, "hclk_cif2", "hclk_vio_pre", 0,
+ RV1108_CLKGATE_CON(17), 9, GFLAGS),
+ COMPOSITE_NODIV(SCLK_CIF2, "sclk_cif2", mux_sclk_cif2_src_p, 0,
+ RV1108_CLKSEL_CON(31), 4, 2, MFLAGS,
+ RV1108_CLKGATE_CON(7), 11, GFLAGS),
+ GATE(ACLK_CIF3, "aclk_cif3", "aclk_vio1_pre", 0,
+ RV1108_CLKGATE_CON(17), 10, GFLAGS),
+ GATE(HCLK_CIF3, "hclk_cif3", "hclk_vio_pre", 0,
+ RV1108_CLKGATE_CON(17), 11, GFLAGS),
+ COMPOSITE_NODIV(SCLK_CIF3, "sclk_cif3", mux_sclk_cif3_src_p, 0,
+ RV1108_CLKSEL_CON(31), 6, 2, MFLAGS,
+ RV1108_CLKGATE_CON(7), 12, GFLAGS),
+ GATE(0, "pclk_cif1to4", "pclk_vip", CLK_IGNORE_UNUSED,
+ RV1108_CLKGATE_CON(7), 8, GFLAGS),
+
+ /* PD_DSP_wrapper */
+ COMPOSITE(SCLK_DSP, "sclk_dsp", mux_dsp_src_p, 0,
+ RV1108_CLKSEL_CON(42), 8, 2, MFLAGS, 0, 5, DFLAGS,
+ RV1108_CLKGATE_CON(9), 0, GFLAGS),
+ GATE(0, "clk_dsp_sys_wd", "sclk_dsp", CLK_IGNORE_UNUSED,
+ RV1108_CLKGATE_CON(16), 0, GFLAGS),
+ GATE(0, "clk_dsp_epp_wd", "sclk_dsp", CLK_IGNORE_UNUSED,
+ RV1108_CLKGATE_CON(16), 1, GFLAGS),
+ GATE(0, "clk_dsp_edp_wd", "sclk_dsp", CLK_IGNORE_UNUSED,
+ RV1108_CLKGATE_CON(16), 2, GFLAGS),
+ GATE(0, "clk_dsp_iop_wd", "sclk_dsp", CLK_IGNORE_UNUSED,
+ RV1108_CLKGATE_CON(16), 3, GFLAGS),
+ GATE(0, "clk_dsp_free", "sclk_dsp", CLK_IGNORE_UNUSED,
+ RV1108_CLKGATE_CON(16), 13, GFLAGS),
+ COMPOSITE_NOMUX(SCLK_DSP_IOP, "sclk_dsp_iop", "sclk_dsp", 0,
+ RV1108_CLKSEL_CON(44), 0, 5, DFLAGS,
+ RV1108_CLKGATE_CON(9), 1, GFLAGS),
+ COMPOSITE_NOMUX(SCLK_DSP_EPP, "sclk_dsp_epp", "sclk_dsp", 0,
+ RV1108_CLKSEL_CON(44), 8, 5, DFLAGS,
+ RV1108_CLKGATE_CON(9), 2, GFLAGS),
+ COMPOSITE_NOMUX(SCLK_DSP_EDP, "sclk_dsp_edp", "sclk_dsp", 0,
+ RV1108_CLKSEL_CON(45), 0, 5, DFLAGS,
+ RV1108_CLKGATE_CON(9), 3, GFLAGS),
+ COMPOSITE_NOMUX(SCLK_DSP_EDAP, "sclk_dsp_edap", "sclk_dsp", 0,
+ RV1108_CLKSEL_CON(45), 8, 5, DFLAGS,
+ RV1108_CLKGATE_CON(9), 4, GFLAGS),
+ GATE(0, "pclk_dsp_iop_niu", "sclk_dsp_iop", CLK_IGNORE_UNUSED,
+ RV1108_CLKGATE_CON(16), 4, GFLAGS),
+ GATE(0, "aclk_dsp_epp_niu", "sclk_dsp_epp", CLK_IGNORE_UNUSED,
+ RV1108_CLKGATE_CON(16), 5, GFLAGS),
+ GATE(0, "aclk_dsp_edp_niu", "sclk_dsp_edp", CLK_IGNORE_UNUSED,
+ RV1108_CLKGATE_CON(16), 6, GFLAGS),
+ GATE(0, "pclk_dsp_dbg_niu", "sclk_dsp", CLK_IGNORE_UNUSED,
+ RV1108_CLKGATE_CON(16), 7, GFLAGS),
+ GATE(0, "aclk_dsp_edap_niu", "sclk_dsp_edap", CLK_IGNORE_UNUSED,
+ RV1108_CLKGATE_CON(16), 14, GFLAGS),
+ COMPOSITE_NOMUX(SCLK_DSP_PFM, "sclk_dsp_pfm", "sclk_dsp", 0,
+ RV1108_CLKSEL_CON(43), 0, 5, DFLAGS,
+ RV1108_CLKGATE_CON(9), 5, GFLAGS),
+ COMPOSITE_NOMUX(PCLK_DSP_CFG, "pclk_dsp_cfg", "sclk_dsp", 0,
+ RV1108_CLKSEL_CON(43), 8, 5, DFLAGS,
+ RV1108_CLKGATE_CON(9), 6, GFLAGS),
+ GATE(0, "pclk_dsp_cfg_niu", "pclk_dsp_cfg", CLK_IGNORE_UNUSED,
+ RV1108_CLKGATE_CON(16), 8, GFLAGS),
+ GATE(0, "pclk_dsp_pfm_mon", "pclk_dsp_cfg", CLK_IGNORE_UNUSED,
+ RV1108_CLKGATE_CON(16), 9, GFLAGS),
+ GATE(0, "pclk_intc", "pclk_dsp_cfg", CLK_IGNORE_UNUSED,
+ RV1108_CLKGATE_CON(16), 10, GFLAGS),
+ GATE(0, "pclk_dsp_grf", "pclk_dsp_cfg", CLK_IGNORE_UNUSED,
+ RV1108_CLKGATE_CON(16), 11, GFLAGS),
+ GATE(0, "pclk_mailbox", "pclk_dsp_cfg", CLK_IGNORE_UNUSED,
+ RV1108_CLKGATE_CON(16), 12, GFLAGS),
+ GATE(0, "aclk_dsp_epp_perf", "sclk_dsp_epp", CLK_IGNORE_UNUSED,
+ RV1108_CLKGATE_CON(16), 15, GFLAGS),
+ GATE(0, "aclk_dsp_edp_perf", "sclk_dsp_edp", CLK_IGNORE_UNUSED,
+ RV1108_CLKGATE_CON(11), 8, GFLAGS),
+
+ /*
* Clock-Architecture Diagram 4
*/
- COMPOSITE(0, "aclk_vio0_2wrap_occ", mux_pll_src_4plls_p, CLK_IGNORE_UNUSED,
+ COMPOSITE(0, "aclk_vio0_pre", mux_pll_src_4plls_p, CLK_IGNORE_UNUSED,
RV1108_CLKSEL_CON(28), 6, 2, MFLAGS, 0, 5, DFLAGS,
RV1108_CLKGATE_CON(6), 0, GFLAGS),
- GATE(0, "aclk_vio0_pre", "aclk_vio0_2wrap_occ", CLK_IGNORE_UNUSED,
+ GATE(ACLK_VIO0, "aclk_vio0", "aclk_vio0_pre", 0,
RV1108_CLKGATE_CON(17), 0, GFLAGS),
COMPOSITE_NOMUX(0, "hclk_vio_pre", "aclk_vio0_pre", 0,
RV1108_CLKSEL_CON(29), 0, 5, DFLAGS,
RV1108_CLKGATE_CON(7), 2, GFLAGS),
+ GATE(HCLK_VIO, "hclk_vio", "hclk_vio_pre", 0,
+ RV1108_CLKGATE_CON(17), 2, GFLAGS),
COMPOSITE_NOMUX(0, "pclk_vio_pre", "aclk_vio0_pre", 0,
RV1108_CLKSEL_CON(29), 8, 5, DFLAGS,
RV1108_CLKGATE_CON(7), 3, GFLAGS),
+ GATE(PCLK_VIO, "pclk_vio", "pclk_vio_pre", 0,
+ RV1108_CLKGATE_CON(17), 3, GFLAGS),
+ COMPOSITE(0, "aclk_vio1_pre", mux_pll_src_4plls_p, CLK_IGNORE_UNUSED,
+ RV1108_CLKSEL_CON(28), 14, 2, MFLAGS, 8, 5, DFLAGS,
+ RV1108_CLKGATE_CON(6), 1, GFLAGS),
+ GATE(ACLK_VIO1, "aclk_vio1", "aclk_vio1_pre", 0,
+ RV1108_CLKGATE_CON(17), 1, GFLAGS),
INVERTER(0, "pclk_vip", "ext_vip",
RV1108_CLKSEL_CON(31), 8, IFLAGS),
@@ -252,8 +441,63 @@ static struct rockchip_clk_branch rv1108_clk_branches[] __initdata = {
RV1108_CLKGATE_CON(6), 5, GFLAGS),
GATE(0, "dclk_hdmiphy_src_dpll", "dpll", CLK_IGNORE_UNUSED,
RV1108_CLKGATE_CON(6), 4, GFLAGS),
- COMPOSITE_NOGATE(0, "dclk_hdmiphy", mux_dclk_hdmiphy_pre_p, 0,
- RV1108_CLKSEL_CON(32), 6, 2, MFLAGS, 8, 6, DFLAGS),
+ COMPOSITE_NOGATE(0, "dclk_hdmiphy_pre", mux_dclk_hdmiphy_pre_p, 0,
+ RV1108_CLKSEL_CON(32), 6, 1, MFLAGS, 8, 6, DFLAGS),
+ COMPOSITE_NOGATE(DCLK_VOP_SRC, "dclk_vop_src", mux_dclk_hdmiphy_pre_p, 0,
+ RV1108_CLKSEL_CON(32), 6, 1, MFLAGS, 0, 6, DFLAGS),
+ MUX(DCLK_HDMIPHY, "dclk_hdmiphy", mux_dclk_hdmiphy_p, CLK_SET_RATE_PARENT,
+ RV1108_CLKSEL_CON(32), 15, 1, MFLAGS),
+ MUX(DCLK_VOP, "dclk_vop", mux_dclk_vop_p, CLK_SET_RATE_PARENT,
+ RV1108_CLKSEL_CON(32), 7, 1, MFLAGS),
+ GATE(ACLK_VOP, "aclk_vop", "aclk_vio0_pre", 0,
+ RV1108_CLKGATE_CON(18), 0, GFLAGS),
+ GATE(HCLK_VOP, "hclk_vop", "hclk_vio_pre", 0,
+ RV1108_CLKGATE_CON(18), 1, GFLAGS),
+ GATE(ACLK_IEP, "aclk_iep", "aclk_vio0_pre", 0,
+ RV1108_CLKGATE_CON(18), 2, GFLAGS),
+ GATE(HCLK_IEP, "hclk_iep", "hclk_vio_pre", 0,
+ RV1108_CLKGATE_CON(18), 3, GFLAGS),
+
+ GATE(ACLK_RGA, "aclk_rga", "aclk_vio1_pre", 0,
+ RV1108_CLKGATE_CON(18), 4, GFLAGS),
+ GATE(HCLK_RGA, "hclk_rga", "hclk_vio_pre", 0,
+ RV1108_CLKGATE_CON(18), 5, GFLAGS),
+ COMPOSITE(SCLK_RGA, "sclk_rga", mux_pll_src_4plls_p, 0,
+ RV1108_CLKSEL_CON(33), 6, 2, MFLAGS, 0, 5, DFLAGS,
+ RV1108_CLKGATE_CON(6), 6, GFLAGS),
+
+ COMPOSITE(SCLK_CVBS_HOST, "sclk_cvbs_host", mux_cvbs_src_p, 0,
+ RV1108_CLKSEL_CON(33), 13, 2, MFLAGS, 8, 5, DFLAGS,
+ RV1108_CLKGATE_CON(6), 7, GFLAGS),
+ FACTOR(0, "sclk_cvbs_27m", "sclk_cvbs_host", 0, 1, 2),
+
+ GATE(SCLK_HDMI_SFR, "sclk_hdmi_sfr", "xin24m", 0,
+ RV1108_CLKGATE_CON(6), 8, GFLAGS),
+
+ COMPOSITE(SCLK_HDMI_CEC, "sclk_hdmi_cec", mux_hdmi_cec_src_p, 0,
+ RV1108_CLKSEL_CON(34), 14, 2, MFLAGS, 0, 14, DFLAGS,
+ RV1108_CLKGATE_CON(6), 9, GFLAGS),
+ GATE(PCLK_MIPI_DSI, "pclk_mipi_dsi", "pclk_vio_pre", 0,
+ RV1108_CLKGATE_CON(18), 8, GFLAGS),
+ GATE(PCLK_HDMI_CTRL, "pclk_hdmi_ctrl", "pclk_vio_pre", 0,
+ RV1108_CLKGATE_CON(18), 9, GFLAGS),
+
+ GATE(ACLK_ISP, "aclk_isp", "aclk_vio1_pre", 0,
+ RV1108_CLKGATE_CON(18), 12, GFLAGS),
+ GATE(HCLK_ISP, "hclk_isp", "hclk_vio_pre", 0,
+ RV1108_CLKGATE_CON(18), 11, GFLAGS),
+ COMPOSITE(SCLK_ISP, "sclk_isp", mux_pll_src_4plls_p, 0,
+ RV1108_CLKSEL_CON(30), 14, 2, MFLAGS, 8, 5, DFLAGS,
+ RV1108_CLKGATE_CON(6), 3, GFLAGS),
+
+ GATE(0, "clk_dsiphy24m", "xin24m", CLK_IGNORE_UNUSED,
+ RV1108_CLKGATE_CON(9), 10, GFLAGS),
+ GATE(0, "pclk_vdacphy", "pclk_top_pre", CLK_IGNORE_UNUSED,
+ RV1108_CLKGATE_CON(14), 9, GFLAGS),
+ GATE(0, "pclk_mipi_dsiphy", "pclk_top_pre", CLK_IGNORE_UNUSED,
+ RV1108_CLKGATE_CON(14), 11, GFLAGS),
+ GATE(0, "pclk_mipi_csiphy", "pclk_top_pre", CLK_IGNORE_UNUSED,
+ RV1108_CLKGATE_CON(14), 12, GFLAGS),
/*
* Clock-Architecture Diagram 5
@@ -261,10 +505,11 @@ static struct rockchip_clk_branch rv1108_clk_branches[] __initdata = {
FACTOR(0, "xin12m", "xin24m", 0, 1, 2),
- COMPOSITE(0, "i2s0_src", mux_pll_src_2plls_p, 0,
+
+ COMPOSITE(SCLK_I2S0_SRC, "i2s0_src", mux_pll_src_2plls_p, 0,
RV1108_CLKSEL_CON(5), 8, 1, MFLAGS, 0, 7, DFLAGS,
RV1108_CLKGATE_CON(2), 0, GFLAGS),
- COMPOSITE_FRACMUX(0, "i2s1_frac", "i2s1_src", CLK_SET_RATE_PARENT,
+ COMPOSITE_FRACMUX(0, "i2s0_frac", "i2s0_src", CLK_SET_RATE_PARENT,
RV1108_CLKSEL_CON(8), 0,
RV1108_CLKGATE_CON(2), 1, GFLAGS,
&rv1108_i2s0_fracmux),
@@ -274,7 +519,7 @@ static struct rockchip_clk_branch rv1108_clk_branches[] __initdata = {
RV1108_CLKSEL_CON(5), 15, 1, MFLAGS,
RV1108_CLKGATE_CON(2), 3, GFLAGS),
- COMPOSITE(0, "i2s1_src", mux_pll_src_2plls_p, 0,
+ COMPOSITE(SCLK_I2S1_SRC, "i2s1_src", mux_pll_src_2plls_p, 0,
RV1108_CLKSEL_CON(6), 8, 1, MFLAGS, 0, 7, DFLAGS,
RV1108_CLKGATE_CON(2), 4, GFLAGS),
COMPOSITE_FRACMUX(0, "i2s1_frac", "i2s1_src", CLK_SET_RATE_PARENT,
@@ -284,7 +529,7 @@ static struct rockchip_clk_branch rv1108_clk_branches[] __initdata = {
GATE(SCLK_I2S1, "sclk_i2s1", "i2s1_pre", CLK_SET_RATE_PARENT,
RV1108_CLKGATE_CON(2), 6, GFLAGS),
- COMPOSITE(0, "i2s2_src", mux_pll_src_2plls_p, 0,
+ COMPOSITE(SCLK_I2S2_SRC, "i2s2_src", mux_pll_src_2plls_p, 0,
RV1108_CLKSEL_CON(7), 8, 1, MFLAGS, 0, 7, DFLAGS,
RV1108_CLKGATE_CON(3), 8, GFLAGS),
COMPOSITE_FRACMUX(0, "i2s2_frac", "i2s2_src", CLK_SET_RATE_PARENT,
@@ -303,32 +548,53 @@ static struct rockchip_clk_branch rv1108_clk_branches[] __initdata = {
RV1108_CLKGATE_CON(1), 2, GFLAGS),
COMPOSITE_NOGATE(ACLK_PRE, "aclk_bus_pre", mux_aclk_bus_src_p, 0,
RV1108_CLKSEL_CON(2), 8, 2, MFLAGS, 0, 5, DFLAGS),
- COMPOSITE_NOMUX(0, "hclk_bus_pre", "aclk_bus_2wrap_occ", 0,
+ COMPOSITE_NOMUX(HCLK_BUS, "hclk_bus_pre", "aclk_bus_pre", 0,
RV1108_CLKSEL_CON(3), 0, 5, DFLAGS,
RV1108_CLKGATE_CON(1), 4, GFLAGS),
- COMPOSITE_NOMUX(0, "pclken_bus", "aclk_bus_2wrap_occ", 0,
+ COMPOSITE_NOMUX(0, "pclk_bus_pre", "aclk_bus_pre", 0,
RV1108_CLKSEL_CON(3), 8, 5, DFLAGS,
RV1108_CLKGATE_CON(1), 5, GFLAGS),
- GATE(0, "pclk_bus_pre", "pclken_bus", CLK_IGNORE_UNUSED,
+ GATE(PCLK_BUS, "pclk_bus", "pclk_bus_pre", 0,
RV1108_CLKGATE_CON(1), 6, GFLAGS),
- GATE(0, "pclk_top_pre", "pclken_bus", CLK_IGNORE_UNUSED,
+ GATE(0, "pclk_top_pre", "pclk_bus_pre", CLK_IGNORE_UNUSED,
RV1108_CLKGATE_CON(1), 7, GFLAGS),
- GATE(0, "pclk_ddr_pre", "pclken_bus", CLK_IGNORE_UNUSED,
+ GATE(0, "pclk_ddr_pre", "pclk_bus_pre", CLK_IGNORE_UNUSED,
RV1108_CLKGATE_CON(1), 8, GFLAGS),
- GATE(0, "clk_timer0", "mux_pll_p", CLK_IGNORE_UNUSED,
+ GATE(SCLK_TIMER0, "clk_timer0", "xin24m", 0,
RV1108_CLKGATE_CON(1), 9, GFLAGS),
- GATE(0, "clk_timer1", "mux_pll_p", CLK_IGNORE_UNUSED,
+ GATE(SCLK_TIMER1, "clk_timer1", "xin24m", CLK_IGNORE_UNUSED,
RV1108_CLKGATE_CON(1), 10, GFLAGS),
- GATE(0, "pclk_timer", "pclk_bus_pre", CLK_IGNORE_UNUSED,
+ GATE(PCLK_TIMER, "pclk_timer", "pclk_bus_pre", CLK_IGNORE_UNUSED,
RV1108_CLKGATE_CON(13), 4, GFLAGS),
- COMPOSITE(0, "uart0_src", mux_pll_src_dpll_gpll_usb480m_p, CLK_IGNORE_UNUSED,
+ GATE(HCLK_I2S0_8CH, "hclk_i2s0_8ch", "hclk_bus_pre", 0,
+ RV1108_CLKGATE_CON(12), 7, GFLAGS),
+ GATE(HCLK_I2S1_2CH, "hclk_i2s1_2ch", "hclk_bus_pre", 0,
+ RV1108_CLKGATE_CON(12), 8, GFLAGS),
+ GATE(HCLK_I2S2_2CH, "hclk_i2s2_2ch", "hclk_bus_pre", 0,
+ RV1108_CLKGATE_CON(12), 9, GFLAGS),
+
+ GATE(HCLK_CRYPTO_MST, "hclk_crypto_mst", "hclk_bus_pre", 0,
+ RV1108_CLKGATE_CON(12), 10, GFLAGS),
+ GATE(HCLK_CRYPTO_SLV, "hclk_crypto_slv", "hclk_bus_pre", 0,
+ RV1108_CLKGATE_CON(12), 11, GFLAGS),
+ COMPOSITE(SCLK_CRYPTO, "sclk_crypto", mux_pll_src_2plls_p, 0,
+ RV1108_CLKSEL_CON(11), 7, 1, MFLAGS, 0, 5, DFLAGS,
+ RV1108_CLKGATE_CON(2), 12, GFLAGS),
+
+ COMPOSITE(SCLK_SPI, "sclk_spi", mux_pll_src_2plls_p, 0,
+ RV1108_CLKSEL_CON(11), 15, 1, MFLAGS, 8, 5, DFLAGS,
+ RV1108_CLKGATE_CON(3), 0, GFLAGS),
+ GATE(PCLK_SPI, "pclk_spi", "pclk_bus_pre", 0,
+ RV1108_CLKGATE_CON(13), 5, GFLAGS),
+
+ COMPOSITE(SCLK_UART0_SRC, "uart0_src", mux_pll_src_dpll_gpll_usb480m_p, CLK_IGNORE_UNUSED,
RV1108_CLKSEL_CON(13), 12, 2, MFLAGS, 0, 7, DFLAGS,
RV1108_CLKGATE_CON(3), 1, GFLAGS),
- COMPOSITE(0, "uart1_src", mux_pll_src_dpll_gpll_usb480m_p, CLK_IGNORE_UNUSED,
+ COMPOSITE(SCLK_UART1_SRC, "uart1_src", mux_pll_src_dpll_gpll_usb480m_p, CLK_IGNORE_UNUSED,
RV1108_CLKSEL_CON(14), 12, 2, MFLAGS, 0, 7, DFLAGS,
RV1108_CLKGATE_CON(3), 3, GFLAGS),
- COMPOSITE(0, "uart21_src", mux_pll_src_dpll_gpll_usb480m_p, CLK_IGNORE_UNUSED,
+ COMPOSITE(SCLK_UART2_SRC, "uart2_src", mux_pll_src_dpll_gpll_usb480m_p, CLK_IGNORE_UNUSED,
RV1108_CLKSEL_CON(15), 12, 2, MFLAGS, 0, 7, DFLAGS,
RV1108_CLKGATE_CON(3), 5, GFLAGS),
@@ -344,44 +610,58 @@ static struct rockchip_clk_branch rv1108_clk_branches[] __initdata = {
RV1108_CLKSEL_CON(18), 0,
RV1108_CLKGATE_CON(3), 6, GFLAGS,
&rv1108_uart2_fracmux),
- GATE(PCLK_UART0, "pclk_uart0", "pclk_bus_pre", CLK_IGNORE_UNUSED,
+ GATE(PCLK_UART0, "pclk_uart0", "pclk_bus_pre", 0,
RV1108_CLKGATE_CON(13), 10, GFLAGS),
- GATE(PCLK_UART1, "pclk_uart1", "pclk_bus_pre", CLK_IGNORE_UNUSED,
+ GATE(PCLK_UART1, "pclk_uart1", "pclk_bus_pre", 0,
RV1108_CLKGATE_CON(13), 11, GFLAGS),
- GATE(PCLK_UART2, "pclk_uart2", "pclk_bus_pre", CLK_IGNORE_UNUSED,
+ GATE(PCLK_UART2, "pclk_uart2", "pclk_bus_pre", 0,
RV1108_CLKGATE_CON(13), 12, GFLAGS),
- COMPOSITE(0, "clk_i2c1", mux_pll_src_2plls_p, CLK_IGNORE_UNUSED,
- RV1108_CLKSEL_CON(19), 15, 2, MFLAGS, 8, 7, DFLAGS,
+ COMPOSITE(SCLK_I2C1, "clk_i2c1", mux_pll_src_2plls_p, 0,
+ RV1108_CLKSEL_CON(19), 15, 1, MFLAGS, 8, 7, DFLAGS,
RV1108_CLKGATE_CON(3), 7, GFLAGS),
- COMPOSITE(0, "clk_i2c2", mux_pll_src_2plls_p, CLK_IGNORE_UNUSED,
- RV1108_CLKSEL_CON(20), 7, 2, MFLAGS, 0, 7, DFLAGS,
+ COMPOSITE(SCLK_I2C2, "clk_i2c2", mux_pll_src_2plls_p, 0,
+ RV1108_CLKSEL_CON(20), 7, 1, MFLAGS, 0, 7, DFLAGS,
RV1108_CLKGATE_CON(3), 8, GFLAGS),
- COMPOSITE(0, "clk_i2c3", mux_pll_src_2plls_p, CLK_IGNORE_UNUSED,
- RV1108_CLKSEL_CON(20), 15, 2, MFLAGS, 8, 7, DFLAGS,
+ COMPOSITE(SCLK_I2C3, "clk_i2c3", mux_pll_src_2plls_p, 0,
+ RV1108_CLKSEL_CON(20), 15, 1, MFLAGS, 8, 7, DFLAGS,
RV1108_CLKGATE_CON(3), 9, GFLAGS),
- GATE(0, "pclk_i2c1", "pclk_bus_pre", CLK_IGNORE_UNUSED,
+ GATE(PCLK_I2C1, "pclk_i2c1", "pclk_bus_pre", 0,
RV1108_CLKGATE_CON(13), 0, GFLAGS),
- GATE(0, "pclk_i2c2", "pclk_bus_pre", CLK_IGNORE_UNUSED,
+ GATE(PCLK_I2C2, "pclk_i2c2", "pclk_bus_pre", 0,
RV1108_CLKGATE_CON(13), 1, GFLAGS),
- GATE(0, "pclk_i2c3", "pclk_bus_pre", CLK_IGNORE_UNUSED,
+ GATE(PCLK_I2C3, "pclk_i2c3", "pclk_bus_pre", 0,
RV1108_CLKGATE_CON(13), 2, GFLAGS),
- COMPOSITE(0, "clk_pwm1", mux_pll_src_2plls_p, CLK_IGNORE_UNUSED,
+ COMPOSITE(SCLK_PWM, "clk_pwm", mux_pll_src_2plls_p, 0,
RV1108_CLKSEL_CON(12), 15, 2, MFLAGS, 8, 7, DFLAGS,
RV1108_CLKGATE_CON(3), 10, GFLAGS),
- GATE(0, "pclk_pwm1", "pclk_bus_pre", CLK_IGNORE_UNUSED,
+ GATE(PCLK_PWM, "pclk_pwm", "pclk_bus_pre", 0,
RV1108_CLKGATE_CON(13), 6, GFLAGS),
- GATE(0, "pclk_wdt", "pclk_bus_pre", CLK_IGNORE_UNUSED,
+ GATE(PCLK_WDT, "pclk_wdt", "pclk_bus_pre", 0,
RV1108_CLKGATE_CON(13), 3, GFLAGS),
- GATE(0, "pclk_gpio1", "pclk_bus_pre", CLK_IGNORE_UNUSED,
+ GATE(PCLK_GPIO1, "pclk_gpio1", "pclk_bus_pre", 0,
RV1108_CLKGATE_CON(13), 7, GFLAGS),
- GATE(0, "pclk_gpio2", "pclk_bus_pre", CLK_IGNORE_UNUSED,
+ GATE(PCLK_GPIO2, "pclk_gpio2", "pclk_bus_pre", 0,
RV1108_CLKGATE_CON(13), 8, GFLAGS),
- GATE(0, "pclk_gpio3", "pclk_bus_pre", CLK_IGNORE_UNUSED,
+ GATE(PCLK_GPIO3, "pclk_gpio3", "pclk_bus_pre", 0,
RV1108_CLKGATE_CON(13), 9, GFLAGS),
GATE(0, "pclk_grf", "pclk_bus_pre", CLK_IGNORE_UNUSED,
RV1108_CLKGATE_CON(14), 0, GFLAGS),
+ GATE(PCLK_EFUSE0, "pclk_efuse0", "pclk_bus_pre", 0,
+ RV1108_CLKGATE_CON(12), 12, GFLAGS),
+ GATE(PCLK_EFUSE1, "pclk_efuse1", "pclk_bus_pre", 0,
+ RV1108_CLKGATE_CON(12), 13, GFLAGS),
+ GATE(PCLK_TSADC, "pclk_tsadc", "pclk_bus_pre", 0,
+ RV1108_CLKGATE_CON(13), 13, GFLAGS),
+ COMPOSITE_NOMUX(SCLK_TSADC, "sclk_tsadc", "xin24m", 0,
+ RV1108_CLKSEL_CON(21), 0, 10, DFLAGS,
+ RV1108_CLKGATE_CON(3), 11, GFLAGS),
+ GATE(PCLK_SARADC, "pclk_saradc", "pclk_bus_pre", 0,
+ RV1108_CLKGATE_CON(13), 14, GFLAGS),
+ COMPOSITE_NOMUX(SCLK_SARADC, "sclk_saradc", "xin24m", 0,
+ RV1108_CLKSEL_CON(22), 0, 10, DFLAGS,
+ RV1108_CLKGATE_CON(3), 12, GFLAGS),
GATE(ACLK_DMAC, "aclk_dmac", "aclk_bus_pre", 0,
RV1108_CLKGATE_CON(12), 2, GFLAGS),
@@ -397,18 +677,24 @@ static struct rockchip_clk_branch rv1108_clk_branches[] __initdata = {
RV1108_CLKGATE_CON(0), 9, GFLAGS),
GATE(0, "gpll_ddr", "gpll", CLK_IGNORE_UNUSED,
RV1108_CLKGATE_CON(0), 10, GFLAGS),
- COMPOSITE(0, "ddrphy4x", mux_ddrphy_p, CLK_IGNORE_UNUSED,
+ COMPOSITE_NOGATE(0, "clk_ddrphy_src", mux_ddrphy_p, CLK_IGNORE_UNUSED,
RV1108_CLKSEL_CON(4), 8, 2, MFLAGS, 0, 3,
- DFLAGS | CLK_DIVIDER_POWER_OF_TWO,
+ DFLAGS | CLK_DIVIDER_POWER_OF_TWO),
+ FACTOR(0, "clk_ddr", "clk_ddrphy_src", 0, 1, 2),
+ GATE(0, "clk_ddrphy4x", "clk_ddr", CLK_IGNORE_UNUSED,
RV1108_CLKGATE_CON(10), 9, GFLAGS),
- GATE(0, "ddrupctl", "ddrphy_pre", CLK_IGNORE_UNUSED,
+ GATE(0, "pclk_ddrupctl", "pclk_ddr_pre", CLK_IGNORE_UNUSED,
RV1108_CLKGATE_CON(12), 4, GFLAGS),
- GATE(0, "ddrc", "ddrphy", CLK_IGNORE_UNUSED,
+ GATE(0, "nclk_ddrupctl", "clk_ddr", CLK_IGNORE_UNUSED,
RV1108_CLKGATE_CON(12), 5, GFLAGS),
- GATE(0, "ddrmon", "ddrphy_pre", CLK_IGNORE_UNUSED,
+ GATE(0, "pclk_ddrmon", "pclk_ddr_pre", CLK_IGNORE_UNUSED,
RV1108_CLKGATE_CON(12), 6, GFLAGS),
GATE(0, "timer_clk", "xin24m", CLK_IGNORE_UNUSED,
RV1108_CLKGATE_CON(0), 11, GFLAGS),
+ GATE(0, "pclk_mschniu", "pclk_ddr_pre", CLK_IGNORE_UNUSED,
+ RV1108_CLKGATE_CON(14), 2, GFLAGS),
+ GATE(0, "pclk_ddrphy", "pclk_ddr_pre", CLK_IGNORE_UNUSED,
+ RV1108_CLKGATE_CON(14), 4, GFLAGS),
/*
* Clock-Architecture Diagram 6
@@ -418,23 +704,23 @@ static struct rockchip_clk_branch rv1108_clk_branches[] __initdata = {
COMPOSITE_NOMUX(0, "pclk_periph_pre", "gpll", 0,
RV1108_CLKSEL_CON(23), 10, 5, DFLAGS,
RV1108_CLKGATE_CON(4), 5, GFLAGS),
- GATE(0, "pclk_periph", "pclk_periph_pre", CLK_IGNORE_UNUSED,
+ GATE(PCLK_PERI, "pclk_periph", "pclk_periph_pre", CLK_IGNORE_UNUSED,
RV1108_CLKGATE_CON(15), 13, GFLAGS),
COMPOSITE_NOMUX(0, "hclk_periph_pre", "gpll", 0,
RV1108_CLKSEL_CON(23), 5, 5, DFLAGS,
RV1108_CLKGATE_CON(4), 4, GFLAGS),
- GATE(0, "hclk_periph", "hclk_periph_pre", CLK_IGNORE_UNUSED,
+ GATE(HCLK_PERI, "hclk_periph", "hclk_periph_pre", CLK_IGNORE_UNUSED,
RV1108_CLKGATE_CON(15), 12, GFLAGS),
GATE(0, "aclk_peri_src_dpll", "dpll", CLK_IGNORE_UNUSED,
RV1108_CLKGATE_CON(4), 1, GFLAGS),
GATE(0, "aclk_peri_src_gpll", "gpll", CLK_IGNORE_UNUSED,
RV1108_CLKGATE_CON(4), 2, GFLAGS),
- COMPOSITE(0, "aclk_periph", mux_aclk_peri_src_p, CLK_IGNORE_UNUSED,
- RV1108_CLKSEL_CON(23), 15, 2, MFLAGS, 0, 5, DFLAGS,
+ COMPOSITE(ACLK_PERI, "aclk_periph", mux_aclk_peri_src_p, 0,
+ RV1108_CLKSEL_CON(23), 15, 1, MFLAGS, 0, 5, DFLAGS,
RV1108_CLKGATE_CON(15), 11, GFLAGS),
- COMPOSITE(SCLK_SDMMC, "sclk_sdmmc0", mux_mmc_src_p, 0,
+ COMPOSITE(SCLK_SDMMC, "sclk_sdmmc", mux_mmc_src_p, 0,
RV1108_CLKSEL_CON(25), 8, 2, MFLAGS, 0, 8, DFLAGS,
RV1108_CLKGATE_CON(5), 0, GFLAGS),
@@ -454,23 +740,31 @@ static struct rockchip_clk_branch rv1108_clk_branches[] __initdata = {
GATE(HCLK_EMMC, "hclk_emmc", "hclk_periph", 0, RV1108_CLKGATE_CON(15), 2, GFLAGS),
COMPOSITE(SCLK_NANDC, "sclk_nandc", mux_pll_src_2plls_p, 0,
- RV1108_CLKSEL_CON(27), 14, 2, MFLAGS, 8, 5, DFLAGS,
+ RV1108_CLKSEL_CON(27), 14, 1, MFLAGS, 8, 5, DFLAGS,
RV1108_CLKGATE_CON(5), 3, GFLAGS),
GATE(HCLK_NANDC, "hclk_nandc", "hclk_periph", 0, RV1108_CLKGATE_CON(15), 3, GFLAGS),
+ GATE(HCLK_HOST0, "hclk_host0", "hclk_periph", 0, RV1108_CLKGATE_CON(15), 6, GFLAGS),
+ GATE(0, "hclk_host0_arb", "hclk_periph", CLK_IGNORE_UNUSED, RV1108_CLKGATE_CON(15), 7, GFLAGS),
+ GATE(HCLK_OTG, "hclk_otg", "hclk_periph", 0, RV1108_CLKGATE_CON(15), 8, GFLAGS),
+ GATE(0, "hclk_otg_pmu", "hclk_periph", CLK_IGNORE_UNUSED, RV1108_CLKGATE_CON(15), 9, GFLAGS),
+ GATE(SCLK_USBPHY, "clk_usbphy", "xin24m", CLK_IGNORE_UNUSED, RV1108_CLKGATE_CON(5), 5, GFLAGS),
+
COMPOSITE(SCLK_SFC, "sclk_sfc", mux_pll_src_2plls_p, 0,
- RV1108_CLKSEL_CON(27), 7, 2, MFLAGS, 0, 7, DFLAGS,
+ RV1108_CLKSEL_CON(27), 7, 1, MFLAGS, 0, 7, DFLAGS,
RV1108_CLKGATE_CON(5), 4, GFLAGS),
GATE(HCLK_SFC, "hclk_sfc", "hclk_periph", 0, RV1108_CLKGATE_CON(15), 10, GFLAGS),
- COMPOSITE(0, "sclk_macphy_pre", mux_pll_src_apll_gpll_p, 0,
- RV1108_CLKSEL_CON(24), 12, 2, MFLAGS, 0, 5, DFLAGS,
+ COMPOSITE(SCLK_MAC_PRE, "sclk_mac_pre", mux_pll_src_apll_gpll_p, 0,
+ RV1108_CLKSEL_CON(24), 12, 1, MFLAGS, 0, 5, DFLAGS,
RV1108_CLKGATE_CON(4), 10, GFLAGS),
- MUX(0, "sclk_macphy", mux_sclk_macphy_p, CLK_SET_RATE_PARENT,
- RV1108_CLKSEL_CON(24), 8, 2, MFLAGS),
- GATE(0, "sclk_macphy_rx", "sclk_macphy", 0, RV1108_CLKGATE_CON(4), 8, GFLAGS),
- GATE(0, "sclk_mac_ref", "sclk_macphy", 0, RV1108_CLKGATE_CON(4), 6, GFLAGS),
- GATE(0, "sclk_mac_refout", "sclk_macphy", 0, RV1108_CLKGATE_CON(4), 7, GFLAGS),
+ MUX(SCLK_MAC, "sclk_mac", mux_sclk_mac_p, CLK_SET_RATE_PARENT,
+ RV1108_CLKSEL_CON(24), 8, 1, MFLAGS),
+ GATE(SCLK_MAC_RX, "sclk_mac_rx", "sclk_mac", 0, RV1108_CLKGATE_CON(4), 8, GFLAGS),
+ GATE(SCLK_MAC_REF, "sclk_mac_ref", "sclk_mac", 0, RV1108_CLKGATE_CON(4), 6, GFLAGS),
+ GATE(SCLK_MAC_REFOUT, "sclk_mac_refout", "sclk_mac", 0, RV1108_CLKGATE_CON(4), 7, GFLAGS),
+ GATE(ACLK_GMAC, "aclk_gmac", "aclk_periph", 0, RV1108_CLKGATE_CON(15), 4, GFLAGS),
+ GATE(PCLK_GMAC, "pclk_gmac", "pclk_periph", 0, RV1108_CLKGATE_CON(15), 5, GFLAGS),
MMC(SCLK_SDMMC_DRV, "sdmmc_drv", "sclk_sdmmc", RV1108_SDMMC_CON0, 1),
MMC(SCLK_SDMMC_SAMPLE, "sdmmc_sample", "sclk_sdmmc", RV1108_SDMMC_CON1, 1),
@@ -484,10 +778,16 @@ static struct rockchip_clk_branch rv1108_clk_branches[] __initdata = {
static const char *const rv1108_critical_clocks[] __initconst = {
"aclk_core",
- "aclk_bus_src_gpll",
+ "aclk_bus",
+ "hclk_bus",
+ "pclk_bus",
"aclk_periph",
"hclk_periph",
"pclk_periph",
+ "nclk_ddrupctl",
+ "pclk_ddrmon",
+ "pclk_acodecphy",
+ "pclk_pmu",
};
static void __init rv1108_clk_init(struct device_node *np)
diff --git a/drivers/clk/rockchip/clk.c b/drivers/clk/rockchip/clk.c
index fe1d393cf678..35dbd63c2f49 100644
--- a/drivers/clk/rockchip/clk.c
+++ b/drivers/clk/rockchip/clk.c
@@ -29,6 +29,7 @@
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
#include <linux/reboot.h>
+#include <linux/rational.h>
#include "clk.h"
/**
@@ -164,6 +165,40 @@ static int rockchip_clk_frac_notifier_cb(struct notifier_block *nb,
return notifier_from_errno(ret);
}
+/**
+ * fractional divider must set that denominator is 20 times larger than
+ * numerator to generate precise clock frequency.
+ */
+static void rockchip_fractional_approximation(struct clk_hw *hw,
+ unsigned long rate, unsigned long *parent_rate,
+ unsigned long *m, unsigned long *n)
+{
+ struct clk_fractional_divider *fd = to_clk_fd(hw);
+ unsigned long p_rate, p_parent_rate;
+ struct clk_hw *p_parent;
+ unsigned long scale;
+
+ p_rate = clk_hw_get_rate(clk_hw_get_parent(hw));
+ if ((rate * 20 > p_rate) && (p_rate % rate != 0)) {
+ p_parent = clk_hw_get_parent(clk_hw_get_parent(hw));
+ p_parent_rate = clk_hw_get_rate(p_parent);
+ *parent_rate = p_parent_rate;
+ }
+
+ /*
+ * Get rate closer to *parent_rate to guarantee there is no overflow
+ * for m and n. In the result it will be the nearest rate left shifted
+ * by (scale - fd->nwidth) bits.
+ */
+ scale = fls_long(*parent_rate / rate - 1);
+ if (scale > fd->nwidth)
+ rate <<= scale - fd->nwidth;
+
+ rational_best_approximation(rate, *parent_rate,
+ GENMASK(fd->mwidth - 1, 0), GENMASK(fd->nwidth - 1, 0),
+ m, n);
+}
+
static struct clk *rockchip_clk_register_frac_branch(
struct rockchip_clk_provider *ctx, const char *name,
const char *const *parent_names, u8 num_parents,
@@ -210,6 +245,7 @@ static struct clk *rockchip_clk_register_frac_branch(
div->nwidth = 16;
div->nmask = GENMASK(div->nwidth - 1, 0) << div->nshift;
div->lock = lock;
+ div->approximation = rockchip_fractional_approximation;
div_ops = &clk_fractional_divider_ops;
clk = clk_register_composite(NULL, name, parent_names, num_parents,
diff --git a/drivers/clk/samsung/clk-exynos-audss.c b/drivers/clk/samsung/clk-exynos-audss.c
index 1fab56f396d4..b117783ed404 100644
--- a/drivers/clk/samsung/clk-exynos-audss.c
+++ b/drivers/clk/samsung/clk-exynos-audss.c
@@ -180,7 +180,7 @@ static int exynos_audss_clk_probe(struct platform_device *pdev)
}
clk_table[EXYNOS_MOUT_AUDSS] = clk_hw_register_mux(NULL, "mout_audss",
mout_audss_p, ARRAY_SIZE(mout_audss_p),
- CLK_SET_RATE_NO_REPARENT,
+ CLK_SET_RATE_NO_REPARENT | CLK_SET_RATE_PARENT,
reg_base + ASS_CLK_SRC, 0, 1, 0, &lock);
cdclk = devm_clk_get(&pdev->dev, "cdclk");
@@ -195,11 +195,11 @@ static int exynos_audss_clk_probe(struct platform_device *pdev)
reg_base + ASS_CLK_SRC, 2, 2, 0, &lock);
clk_table[EXYNOS_DOUT_SRP] = clk_hw_register_divider(NULL, "dout_srp",
- "mout_audss", 0, reg_base + ASS_CLK_DIV, 0, 4,
- 0, &lock);
+ "mout_audss", CLK_SET_RATE_PARENT,
+ reg_base + ASS_CLK_DIV, 0, 4, 0, &lock);
clk_table[EXYNOS_DOUT_AUD_BUS] = clk_hw_register_divider(NULL,
- "dout_aud_bus", "dout_srp", 0,
+ "dout_aud_bus", "dout_srp", CLK_SET_RATE_PARENT,
reg_base + ASS_CLK_DIV, 4, 4, 0, &lock);
clk_table[EXYNOS_DOUT_I2S] = clk_hw_register_divider(NULL, "dout_i2s",
diff --git a/drivers/clk/samsung/clk-exynos5420.c b/drivers/clk/samsung/clk-exynos5420.c
index 9a6476aa7d81..25601967d1cd 100644
--- a/drivers/clk/samsung/clk-exynos5420.c
+++ b/drivers/clk/samsung/clk-exynos5420.c
@@ -537,8 +537,8 @@ static const struct samsung_mux_clock exynos5800_mux_clks[] __initconst = {
MUX(CLK_MOUT_MX_MSPLL_CCORE, "mout_mx_mspll_ccore",
mout_mx_mspll_ccore_p, SRC_TOP7, 16, 2),
- MUX(CLK_MOUT_MAU_EPLL, "mout_mau_epll_clk", mout_mau_epll_clk_5800_p,
- SRC_TOP7, 20, 2),
+ MUX_F(CLK_MOUT_MAU_EPLL, "mout_mau_epll_clk", mout_mau_epll_clk_5800_p,
+ SRC_TOP7, 20, 2, CLK_SET_RATE_PARENT, 0),
MUX(0, "sclk_bpll", mout_bpll_p, SRC_TOP7, 24, 1),
MUX(0, "mout_epll2", mout_epll2_5800_p, SRC_TOP7, 28, 1),
@@ -547,8 +547,8 @@ static const struct samsung_mux_clock exynos5800_mux_clks[] __initconst = {
MUX(0, "mout_aclk432_cam", mout_group6_5800_p, SRC_TOP8, 24, 2),
MUX(0, "mout_aclk432_scaler", mout_group6_5800_p, SRC_TOP8, 28, 2),
- MUX(CLK_MOUT_USER_MAU_EPLL, "mout_user_mau_epll", mout_group16_5800_p,
- SRC_TOP9, 8, 1),
+ MUX_F(CLK_MOUT_USER_MAU_EPLL, "mout_user_mau_epll", mout_group16_5800_p,
+ SRC_TOP9, 8, 1, CLK_SET_RATE_PARENT, 0),
MUX(0, "mout_user_aclk550_cam", mout_group15_5800_p,
SRC_TOP9, 16, 1),
MUX(0, "mout_user_aclkfl1_550_cam", mout_group13_5800_p,
@@ -590,6 +590,8 @@ static const struct samsung_gate_clock exynos5800_gate_clks[] __initconst = {
GATE_BUS_TOP, 24, 0, 0),
GATE(CLK_ACLK432_SCALER, "aclk432_scaler", "mout_user_aclk432_scaler",
GATE_BUS_TOP, 27, CLK_IS_CRITICAL, 0),
+ GATE(CLK_MAU_EPLL, "mau_epll", "mout_user_mau_epll",
+ SRC_MASK_TOP7, 20, CLK_SET_RATE_PARENT, 0),
};
static const struct samsung_mux_clock exynos5420_mux_clks[] __initconst = {
@@ -629,6 +631,11 @@ static const struct samsung_div_clock exynos5420_div_clks[] __initconst = {
"mout_aclk400_wcore_bpll", DIV_TOP0, 16, 3),
};
+static const struct samsung_gate_clock exynos5420_gate_clks[] __initconst = {
+ GATE(CLK_MAU_EPLL, "mau_epll", "mout_mau_epll_clk",
+ SRC_MASK_TOP7, 20, CLK_SET_RATE_PARENT, 0),
+};
+
static const struct samsung_mux_clock exynos5x_mux_clks[] __initconst = {
MUX(0, "mout_user_pclk66_gpio", mout_user_pclk66_gpio_p,
SRC_TOP7, 4, 1),
@@ -706,7 +713,8 @@ static const struct samsung_mux_clock exynos5x_mux_clks[] __initconst = {
MUX(0, "mout_sclk_spll", mout_spll_p, SRC_TOP6, 8, 1),
MUX(0, "mout_sclk_ipll", mout_ipll_p, SRC_TOP6, 12, 1),
MUX(0, "mout_sclk_rpll", mout_rpll_p, SRC_TOP6, 16, 1),
- MUX(CLK_MOUT_EPLL, "mout_sclk_epll", mout_epll_p, SRC_TOP6, 20, 1),
+ MUX_F(CLK_MOUT_EPLL, "mout_sclk_epll", mout_epll_p, SRC_TOP6, 20, 1,
+ CLK_SET_RATE_PARENT, 0),
MUX(0, "mout_sclk_dpll", mout_dpll_p, SRC_TOP6, 24, 1),
MUX(0, "mout_sclk_cpll", mout_cpll_p, SRC_TOP6, 28, 1),
@@ -1001,9 +1009,6 @@ static const struct samsung_gate_clock exynos5x_gate_clks[] __initconst = {
GATE(0, "aclk300_disp1", "mout_user_aclk300_disp1",
SRC_MASK_TOP2, 24, CLK_IS_CRITICAL, 0),
- GATE(CLK_MAU_EPLL, "mau_epll", "mout_mau_epll_clk",
- SRC_MASK_TOP7, 20, 0, 0),
-
/* sclk */
GATE(CLK_SCLK_UART0, "sclk_uart0", "dout_uart0",
GATE_TOP_SCLK_PERIC, 0, CLK_SET_RATE_PARENT, 0),
@@ -1440,6 +1445,8 @@ static void __init exynos5x_clk_init(struct device_node *np,
ARRAY_SIZE(exynos5420_mux_clks));
samsung_clk_register_div(ctx, exynos5420_div_clks,
ARRAY_SIZE(exynos5420_div_clks));
+ samsung_clk_register_gate(ctx, exynos5420_gate_clks,
+ ARRAY_SIZE(exynos5420_gate_clks));
} else {
samsung_clk_register_fixed_factor(
ctx, exynos5800_fixed_factor_clks,
diff --git a/drivers/clk/sunxi-ng/Kconfig b/drivers/clk/sunxi-ng/Kconfig
index 7342928c35cd..6427d0ebe2de 100644
--- a/drivers/clk/sunxi-ng/Kconfig
+++ b/drivers/clk/sunxi-ng/Kconfig
@@ -11,6 +11,19 @@ config SUN50I_A64_CCU
default ARM64 && ARCH_SUNXI
depends on (ARM64 && ARCH_SUNXI) || COMPILE_TEST
+config SUN4I_A10_CCU
+ bool "Support for the Allwinner A10/A20 CCU"
+ select SUNXI_CCU_DIV
+ select SUNXI_CCU_MULT
+ select SUNXI_CCU_NK
+ select SUNXI_CCU_NKM
+ select SUNXI_CCU_NM
+ select SUNXI_CCU_MP
+ select SUNXI_CCU_PHASE
+ default MACH_SUN4I
+ default MACH_SUN7I
+ depends on MACH_SUN4I || MACH_SUN7I || COMPILE_TEST
+
config SUN5I_CCU
bool "Support for the Allwinner sun5i family CCM"
default MACH_SUN5I
@@ -48,6 +61,11 @@ config SUN8I_V3S_CCU
config SUN8I_DE2_CCU
bool "Support for the Allwinner SoCs DE2 CCU"
+config SUN8I_R40_CCU
+ bool "Support for the Allwinner R40 CCU"
+ default MACH_SUN8I
+ depends on MACH_SUN8I || COMPILE_TEST
+
config SUN9I_A80_CCU
bool "Support for the Allwinner A80 CCU"
default MACH_SUN9I
diff --git a/drivers/clk/sunxi-ng/Makefile b/drivers/clk/sunxi-ng/Makefile
index 0c45fa50283d..85a0633c1eac 100644
--- a/drivers/clk/sunxi-ng/Makefile
+++ b/drivers/clk/sunxi-ng/Makefile
@@ -1,5 +1,6 @@
# Common objects
lib-$(CONFIG_SUNXI_CCU) += ccu_common.o
+lib-$(CONFIG_SUNXI_CCU) += ccu_mmc_timing.o
lib-$(CONFIG_SUNXI_CCU) += ccu_reset.o
# Base clock types
@@ -19,6 +20,7 @@ lib-$(CONFIG_SUNXI_CCU) += ccu_mp.o
# SoC support
obj-$(CONFIG_SUN50I_A64_CCU) += ccu-sun50i-a64.o
+obj-$(CONFIG_SUN4I_A10_CCU) += ccu-sun4i-a10.o
obj-$(CONFIG_SUN5I_CCU) += ccu-sun5i.o
obj-$(CONFIG_SUN6I_A31_CCU) += ccu-sun6i-a31.o
obj-$(CONFIG_SUN8I_A23_CCU) += ccu-sun8i-a23.o
@@ -28,6 +30,7 @@ obj-$(CONFIG_SUN8I_H3_CCU) += ccu-sun8i-h3.o
obj-$(CONFIG_SUN8I_V3S_CCU) += ccu-sun8i-v3s.o
obj-$(CONFIG_SUN8I_DE2_CCU) += ccu-sun8i-de2.o
obj-$(CONFIG_SUN8I_R_CCU) += ccu-sun8i-r.o
+obj-$(CONFIG_SUN8I_R40_CCU) += ccu-sun8i-r40.o
obj-$(CONFIG_SUN9I_A80_CCU) += ccu-sun9i-a80.o
obj-$(CONFIG_SUN9I_A80_CCU) += ccu-sun9i-a80-de.o
obj-$(CONFIG_SUN9I_A80_CCU) += ccu-sun9i-a80-usb.o
diff --git a/drivers/clk/sunxi-ng/ccu-sun4i-a10.c b/drivers/clk/sunxi-ng/ccu-sun4i-a10.c
new file mode 100644
index 000000000000..286b0049b7b6
--- /dev/null
+++ b/drivers/clk/sunxi-ng/ccu-sun4i-a10.c
@@ -0,0 +1,1456 @@
+/*
+ * Copyright (c) 2017 Priit Laes <plaes@plaes.org>.
+ * Copyright (c) 2017 Maxime Ripard.
+ * Copyright (c) 2017 Jonathan Liu.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/of_address.h>
+
+#include "ccu_common.h"
+#include "ccu_reset.h"
+
+#include "ccu_div.h"
+#include "ccu_gate.h"
+#include "ccu_mp.h"
+#include "ccu_mult.h"
+#include "ccu_nk.h"
+#include "ccu_nkm.h"
+#include "ccu_nkmp.h"
+#include "ccu_nm.h"
+#include "ccu_phase.h"
+
+#include "ccu-sun4i-a10.h"
+
+static struct ccu_nkmp pll_core_clk = {
+ .enable = BIT(31),
+ .n = _SUNXI_CCU_MULT_OFFSET(8, 5, 0),
+ .k = _SUNXI_CCU_MULT(4, 2),
+ .m = _SUNXI_CCU_DIV(0, 2),
+ .p = _SUNXI_CCU_DIV(16, 2),
+ .common = {
+ .reg = 0x000,
+ .hw.init = CLK_HW_INIT("pll-core",
+ "hosc",
+ &ccu_nkmp_ops,
+ 0),
+ },
+};
+
+/*
+ * The Audio PLL is supposed to have 4 outputs: 3 fixed factors from
+ * the base (2x, 4x and 8x), and one variable divider (the one true
+ * pll audio).
+ *
+ * We don't have any need for the variable divider for now, so we just
+ * hardcode it to match with the clock names.
+ */
+#define SUN4I_PLL_AUDIO_REG 0x008
+static struct ccu_nm pll_audio_base_clk = {
+ .enable = BIT(31),
+ .n = _SUNXI_CCU_MULT_OFFSET(8, 7, 0),
+ .m = _SUNXI_CCU_DIV_OFFSET(0, 5, 0),
+ .common = {
+ .reg = 0x008,
+ .hw.init = CLK_HW_INIT("pll-audio-base",
+ "hosc",
+ &ccu_nm_ops,
+ 0),
+ },
+
+};
+
+static struct ccu_mult pll_video0_clk = {
+ .enable = BIT(31),
+ .mult = _SUNXI_CCU_MULT_OFFSET_MIN_MAX(0, 7, 0, 9, 127),
+ .frac = _SUNXI_CCU_FRAC(BIT(15), BIT(14),
+ 270000000, 297000000),
+ .common = {
+ .reg = 0x010,
+ .features = (CCU_FEATURE_FRACTIONAL |
+ CCU_FEATURE_ALL_PREDIV),
+ .prediv = 8,
+ .hw.init = CLK_HW_INIT("pll-video0",
+ "hosc",
+ &ccu_mult_ops,
+ 0),
+ },
+};
+
+static struct ccu_nkmp pll_ve_sun4i_clk = {
+ .enable = BIT(31),
+ .n = _SUNXI_CCU_MULT_OFFSET(8, 5, 0),
+ .k = _SUNXI_CCU_MULT(4, 2),
+ .m = _SUNXI_CCU_DIV(0, 2),
+ .p = _SUNXI_CCU_DIV(16, 2),
+ .common = {
+ .reg = 0x018,
+ .hw.init = CLK_HW_INIT("pll-ve",
+ "hosc",
+ &ccu_nkmp_ops,
+ 0),
+ },
+};
+
+static struct ccu_nk pll_ve_sun7i_clk = {
+ .enable = BIT(31),
+ .n = _SUNXI_CCU_MULT_OFFSET(8, 5, 0),
+ .k = _SUNXI_CCU_MULT(4, 2),
+ .common = {
+ .reg = 0x018,
+ .hw.init = CLK_HW_INIT("pll-ve",
+ "hosc",
+ &ccu_nk_ops,
+ 0),
+ },
+};
+
+static struct ccu_nk pll_ddr_base_clk = {
+ .enable = BIT(31),
+ .n = _SUNXI_CCU_MULT_OFFSET(8, 5, 0),
+ .k = _SUNXI_CCU_MULT(4, 2),
+ .common = {
+ .reg = 0x020,
+ .hw.init = CLK_HW_INIT("pll-ddr-base",
+ "hosc",
+ &ccu_nk_ops,
+ 0),
+ },
+};
+
+static SUNXI_CCU_M(pll_ddr_clk, "pll-ddr", "pll-ddr-base", 0x020, 0, 2,
+ CLK_IS_CRITICAL);
+
+static struct ccu_div pll_ddr_other_clk = {
+ .div = _SUNXI_CCU_DIV_FLAGS(16, 2, CLK_DIVIDER_POWER_OF_TWO),
+ .common = {
+ .reg = 0x020,
+ .hw.init = CLK_HW_INIT("pll-ddr-other", "pll-ddr-base",
+ &ccu_div_ops,
+ 0),
+ },
+};
+
+static struct ccu_nk pll_periph_base_clk = {
+ .enable = BIT(31),
+ .n = _SUNXI_CCU_MULT_OFFSET(8, 5, 0),
+ .k = _SUNXI_CCU_MULT(4, 2),
+ .common = {
+ .reg = 0x028,
+ .hw.init = CLK_HW_INIT("pll-periph-base",
+ "hosc",
+ &ccu_nk_ops,
+ 0),
+ },
+};
+
+static CLK_FIXED_FACTOR(pll_periph_clk, "pll-periph", "pll-periph-base",
+ 2, 1, CLK_SET_RATE_PARENT);
+
+/* Not documented on A10 */
+static struct ccu_div pll_periph_sata_clk = {
+ .enable = BIT(14),
+ .div = _SUNXI_CCU_DIV(0, 2),
+ .fixed_post_div = 6,
+ .common = {
+ .reg = 0x028,
+ .features = CCU_FEATURE_FIXED_POSTDIV,
+ .hw.init = CLK_HW_INIT("pll-periph-sata",
+ "pll-periph-base",
+ &ccu_div_ops, 0),
+ },
+};
+
+static struct ccu_mult pll_video1_clk = {
+ .enable = BIT(31),
+ .mult = _SUNXI_CCU_MULT_OFFSET_MIN_MAX(0, 7, 0, 9, 127),
+ .frac = _SUNXI_CCU_FRAC(BIT(15), BIT(14),
+ 270000000, 297000000),
+ .common = {
+ .reg = 0x030,
+ .features = (CCU_FEATURE_FRACTIONAL |
+ CCU_FEATURE_ALL_PREDIV),
+ .prediv = 8,
+ .hw.init = CLK_HW_INIT("pll-video1",
+ "hosc",
+ &ccu_mult_ops,
+ 0),
+ },
+};
+
+/* Not present on A10 */
+static struct ccu_nk pll_gpu_clk = {
+ .enable = BIT(31),
+ .n = _SUNXI_CCU_MULT_OFFSET(8, 5, 0),
+ .k = _SUNXI_CCU_MULT(4, 2),
+ .common = {
+ .reg = 0x040,
+ .hw.init = CLK_HW_INIT("pll-gpu",
+ "hosc",
+ &ccu_nk_ops,
+ 0),
+ },
+};
+
+static SUNXI_CCU_GATE(hosc_clk, "hosc", "osc24M", 0x050, BIT(0), 0);
+
+static const char *const cpu_parents[] = { "osc32k", "hosc",
+ "pll-core", "pll-periph" };
+static const struct ccu_mux_fixed_prediv cpu_predivs[] = {
+ { .index = 3, .div = 3, },
+};
+
+#define SUN4I_AHB_REG 0x054
+static struct ccu_mux cpu_clk = {
+ .mux = {
+ .shift = 16,
+ .width = 2,
+ .fixed_predivs = cpu_predivs,
+ .n_predivs = ARRAY_SIZE(cpu_predivs),
+ },
+ .common = {
+ .reg = 0x054,
+ .features = CCU_FEATURE_FIXED_PREDIV,
+ .hw.init = CLK_HW_INIT_PARENTS("cpu",
+ cpu_parents,
+ &ccu_mux_ops,
+ CLK_IS_CRITICAL),
+ }
+};
+
+static SUNXI_CCU_M(axi_clk, "axi", "cpu", 0x054, 0, 2, 0);
+
+static struct ccu_div ahb_sun4i_clk = {
+ .div = _SUNXI_CCU_DIV_FLAGS(4, 2, CLK_DIVIDER_POWER_OF_TWO),
+ .common = {
+ .reg = 0x054,
+ .hw.init = CLK_HW_INIT("ahb", "axi", &ccu_div_ops, 0),
+ },
+};
+
+static const char *const ahb_sun7i_parents[] = { "axi", "pll-periph",
+ "pll-periph" };
+static const struct ccu_mux_fixed_prediv ahb_sun7i_predivs[] = {
+ { .index = 1, .div = 2, },
+ { /* Sentinel */ },
+};
+static struct ccu_div ahb_sun7i_clk = {
+ .div = _SUNXI_CCU_DIV_FLAGS(4, 2, CLK_DIVIDER_POWER_OF_TWO),
+ .mux = {
+ .shift = 6,
+ .width = 2,
+ .fixed_predivs = ahb_sun7i_predivs,
+ .n_predivs = ARRAY_SIZE(ahb_sun7i_predivs),
+ },
+
+ .common = {
+ .reg = 0x054,
+ .hw.init = CLK_HW_INIT_PARENTS("ahb",
+ ahb_sun7i_parents,
+ &ccu_div_ops,
+ 0),
+ },
+};
+
+static struct clk_div_table apb0_div_table[] = {
+ { .val = 0, .div = 2 },
+ { .val = 1, .div = 2 },
+ { .val = 2, .div = 4 },
+ { .val = 3, .div = 8 },
+ { /* Sentinel */ },
+};
+static SUNXI_CCU_DIV_TABLE(apb0_clk, "apb0", "ahb",
+ 0x054, 8, 2, apb0_div_table, 0);
+
+static const char *const apb1_parents[] = { "hosc", "pll-periph", "osc32k" };
+static SUNXI_CCU_MP_WITH_MUX(apb1_clk, "apb1", apb1_parents, 0x058,
+ 0, 5, /* M */
+ 16, 2, /* P */
+ 24, 2, /* mux */
+ 0);
+
+/* Not present on A20 */
+static SUNXI_CCU_GATE(axi_dram_clk, "axi-dram", "ahb",
+ 0x05c, BIT(31), 0);
+
+static SUNXI_CCU_GATE(ahb_otg_clk, "ahb-otg", "ahb",
+ 0x060, BIT(0), 0);
+static SUNXI_CCU_GATE(ahb_ehci0_clk, "ahb-ehci0", "ahb",
+ 0x060, BIT(1), 0);
+static SUNXI_CCU_GATE(ahb_ohci0_clk, "ahb-ohci0", "ahb",
+ 0x060, BIT(2), 0);
+static SUNXI_CCU_GATE(ahb_ehci1_clk, "ahb-ehci1", "ahb",
+ 0x060, BIT(3), 0);
+static SUNXI_CCU_GATE(ahb_ohci1_clk, "ahb-ohci1", "ahb",
+ 0x060, BIT(4), 0);
+static SUNXI_CCU_GATE(ahb_ss_clk, "ahb-ss", "ahb",
+ 0x060, BIT(5), 0);
+static SUNXI_CCU_GATE(ahb_dma_clk, "ahb-dma", "ahb",
+ 0x060, BIT(6), 0);
+static SUNXI_CCU_GATE(ahb_bist_clk, "ahb-bist", "ahb",
+ 0x060, BIT(7), 0);
+static SUNXI_CCU_GATE(ahb_mmc0_clk, "ahb-mmc0", "ahb",
+ 0x060, BIT(8), 0);
+static SUNXI_CCU_GATE(ahb_mmc1_clk, "ahb-mmc1", "ahb",
+ 0x060, BIT(9), 0);
+static SUNXI_CCU_GATE(ahb_mmc2_clk, "ahb-mmc2", "ahb",
+ 0x060, BIT(10), 0);
+static SUNXI_CCU_GATE(ahb_mmc3_clk, "ahb-mmc3", "ahb",
+ 0x060, BIT(11), 0);
+static SUNXI_CCU_GATE(ahb_ms_clk, "ahb-ms", "ahb",
+ 0x060, BIT(12), 0);
+static SUNXI_CCU_GATE(ahb_nand_clk, "ahb-nand", "ahb",
+ 0x060, BIT(13), 0);
+static SUNXI_CCU_GATE(ahb_sdram_clk, "ahb-sdram", "ahb",
+ 0x060, BIT(14), CLK_IS_CRITICAL);
+
+static SUNXI_CCU_GATE(ahb_ace_clk, "ahb-ace", "ahb",
+ 0x060, BIT(16), 0);
+static SUNXI_CCU_GATE(ahb_emac_clk, "ahb-emac", "ahb",
+ 0x060, BIT(17), 0);
+static SUNXI_CCU_GATE(ahb_ts_clk, "ahb-ts", "ahb",
+ 0x060, BIT(18), 0);
+static SUNXI_CCU_GATE(ahb_spi0_clk, "ahb-spi0", "ahb",
+ 0x060, BIT(20), 0);
+static SUNXI_CCU_GATE(ahb_spi1_clk, "ahb-spi1", "ahb",
+ 0x060, BIT(21), 0);
+static SUNXI_CCU_GATE(ahb_spi2_clk, "ahb-spi2", "ahb",
+ 0x060, BIT(22), 0);
+static SUNXI_CCU_GATE(ahb_spi3_clk, "ahb-spi3", "ahb",
+ 0x060, BIT(23), 0);
+static SUNXI_CCU_GATE(ahb_pata_clk, "ahb-pata", "ahb",
+ 0x060, BIT(24), 0);
+/* Not documented on A20 */
+static SUNXI_CCU_GATE(ahb_sata_clk, "ahb-sata", "ahb",
+ 0x060, BIT(25), 0);
+/* Not present on A20 */
+static SUNXI_CCU_GATE(ahb_gps_clk, "ahb-gps", "ahb",
+ 0x060, BIT(26), 0);
+/* Not present on A10 */
+static SUNXI_CCU_GATE(ahb_hstimer_clk, "ahb-hstimer", "ahb",
+ 0x060, BIT(28), 0);
+
+static SUNXI_CCU_GATE(ahb_ve_clk, "ahb-ve", "ahb",
+ 0x064, BIT(0), 0);
+static SUNXI_CCU_GATE(ahb_tvd_clk, "ahb-tvd", "ahb",
+ 0x064, BIT(1), 0);
+static SUNXI_CCU_GATE(ahb_tve0_clk, "ahb-tve0", "ahb",
+ 0x064, BIT(2), 0);
+static SUNXI_CCU_GATE(ahb_tve1_clk, "ahb-tve1", "ahb",
+ 0x064, BIT(3), 0);
+static SUNXI_CCU_GATE(ahb_lcd0_clk, "ahb-lcd0", "ahb",
+ 0x064, BIT(4), 0);
+static SUNXI_CCU_GATE(ahb_lcd1_clk, "ahb-lcd1", "ahb",
+ 0x064, BIT(5), 0);
+static SUNXI_CCU_GATE(ahb_csi0_clk, "ahb-csi0", "ahb",
+ 0x064, BIT(8), 0);
+static SUNXI_CCU_GATE(ahb_csi1_clk, "ahb-csi1", "ahb",
+ 0x064, BIT(9), 0);
+/* Not present on A10 */
+static SUNXI_CCU_GATE(ahb_hdmi1_clk, "ahb-hdmi1", "ahb",
+ 0x064, BIT(10), 0);
+static SUNXI_CCU_GATE(ahb_hdmi0_clk, "ahb-hdmi0", "ahb",
+ 0x064, BIT(11), 0);
+static SUNXI_CCU_GATE(ahb_de_be0_clk, "ahb-de-be0", "ahb",
+ 0x064, BIT(12), 0);
+static SUNXI_CCU_GATE(ahb_de_be1_clk, "ahb-de-be1", "ahb",
+ 0x064, BIT(13), 0);
+static SUNXI_CCU_GATE(ahb_de_fe0_clk, "ahb-de-fe0", "ahb",
+ 0x064, BIT(14), 0);
+static SUNXI_CCU_GATE(ahb_de_fe1_clk, "ahb-de-fe1", "ahb",
+ 0x064, BIT(15), 0);
+/* Not present on A10 */
+static SUNXI_CCU_GATE(ahb_gmac_clk, "ahb-gmac", "ahb",
+ 0x064, BIT(17), 0);
+static SUNXI_CCU_GATE(ahb_mp_clk, "ahb-mp", "ahb",
+ 0x064, BIT(18), 0);
+static SUNXI_CCU_GATE(ahb_gpu_clk, "ahb-gpu", "ahb",
+ 0x064, BIT(20), 0);
+
+static SUNXI_CCU_GATE(apb0_codec_clk, "apb0-codec", "apb0",
+ 0x068, BIT(0), 0);
+static SUNXI_CCU_GATE(apb0_spdif_clk, "apb0-spdif", "apb0",
+ 0x068, BIT(1), 0);
+static SUNXI_CCU_GATE(apb0_ac97_clk, "apb0-ac97", "apb0",
+ 0x068, BIT(2), 0);
+static SUNXI_CCU_GATE(apb0_i2s0_clk, "apb0-i2s0", "apb0",
+ 0x068, BIT(3), 0);
+/* Not present on A10 */
+static SUNXI_CCU_GATE(apb0_i2s1_clk, "apb0-i2s1", "apb0",
+ 0x068, BIT(4), 0);
+static SUNXI_CCU_GATE(apb0_pio_clk, "apb0-pio", "apb0",
+ 0x068, BIT(5), 0);
+static SUNXI_CCU_GATE(apb0_ir0_clk, "apb0-ir0", "apb0",
+ 0x068, BIT(6), 0);
+static SUNXI_CCU_GATE(apb0_ir1_clk, "apb0-ir1", "apb0",
+ 0x068, BIT(7), 0);
+/* Not present on A10 */
+static SUNXI_CCU_GATE(apb0_i2s2_clk, "apb0-i2s2", "apb0",
+ 0x068, BIT(8), 0);
+static SUNXI_CCU_GATE(apb0_keypad_clk, "apb0-keypad", "apb0",
+ 0x068, BIT(10), 0);
+
+static SUNXI_CCU_GATE(apb1_i2c0_clk, "apb1-i2c0", "apb1",
+ 0x06c, BIT(0), 0);
+static SUNXI_CCU_GATE(apb1_i2c1_clk, "apb1-i2c1", "apb1",
+ 0x06c, BIT(1), 0);
+static SUNXI_CCU_GATE(apb1_i2c2_clk, "apb1-i2c2", "apb1",
+ 0x06c, BIT(2), 0);
+/* Not present on A10 */
+static SUNXI_CCU_GATE(apb1_i2c3_clk, "apb1-i2c3", "apb1",
+ 0x06c, BIT(3), 0);
+static SUNXI_CCU_GATE(apb1_can_clk, "apb1-can", "apb1",
+ 0x06c, BIT(4), 0);
+static SUNXI_CCU_GATE(apb1_scr_clk, "apb1-scr", "apb1",
+ 0x06c, BIT(5), 0);
+static SUNXI_CCU_GATE(apb1_ps20_clk, "apb1-ps20", "apb1",
+ 0x06c, BIT(6), 0);
+static SUNXI_CCU_GATE(apb1_ps21_clk, "apb1-ps21", "apb1",
+ 0x06c, BIT(7), 0);
+/* Not present on A10 */
+static SUNXI_CCU_GATE(apb1_i2c4_clk, "apb1-i2c4", "apb1",
+ 0x06c, BIT(15), 0);
+static SUNXI_CCU_GATE(apb1_uart0_clk, "apb1-uart0", "apb1",
+ 0x06c, BIT(16), 0);
+static SUNXI_CCU_GATE(apb1_uart1_clk, "apb1-uart1", "apb1",
+ 0x06c, BIT(17), 0);
+static SUNXI_CCU_GATE(apb1_uart2_clk, "apb1-uart2", "apb1",
+ 0x06c, BIT(18), 0);
+static SUNXI_CCU_GATE(apb1_uart3_clk, "apb1-uart3", "apb1",
+ 0x06c, BIT(19), 0);
+static SUNXI_CCU_GATE(apb1_uart4_clk, "apb1-uart4", "apb1",
+ 0x06c, BIT(20), 0);
+static SUNXI_CCU_GATE(apb1_uart5_clk, "apb1-uart5", "apb1",
+ 0x06c, BIT(21), 0);
+static SUNXI_CCU_GATE(apb1_uart6_clk, "apb1-uart6", "apb1",
+ 0x06c, BIT(22), 0);
+static SUNXI_CCU_GATE(apb1_uart7_clk, "apb1-uart7", "apb1",
+ 0x06c, BIT(23), 0);
+
+static const char *const mod0_default_parents[] = { "hosc", "pll-periph",
+ "pll-ddr-other" };
+static SUNXI_CCU_MP_WITH_MUX_GATE(nand_clk, "nand", mod0_default_parents, 0x080,
+ 0, 4, /* M */
+ 16, 2, /* P */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+/* Undocumented on A10 */
+static SUNXI_CCU_MP_WITH_MUX_GATE(ms_clk, "ms", mod0_default_parents, 0x084,
+ 0, 4, /* M */
+ 16, 2, /* P */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_MP_WITH_MUX_GATE(mmc0_clk, "mmc0", mod0_default_parents, 0x088,
+ 0, 4, /* M */
+ 16, 2, /* P */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+/* MMC output and sample clocks are not present on A10 */
+static SUNXI_CCU_PHASE(mmc0_output_clk, "mmc0_output", "mmc0",
+ 0x088, 8, 3, 0);
+static SUNXI_CCU_PHASE(mmc0_sample_clk, "mmc0_sample", "mmc0",
+ 0x088, 20, 3, 0);
+
+static SUNXI_CCU_MP_WITH_MUX_GATE(mmc1_clk, "mmc1", mod0_default_parents, 0x08c,
+ 0, 4, /* M */
+ 16, 2, /* P */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+/* MMC output and sample clocks are not present on A10 */
+static SUNXI_CCU_PHASE(mmc1_output_clk, "mmc1_output", "mmc1",
+ 0x08c, 8, 3, 0);
+static SUNXI_CCU_PHASE(mmc1_sample_clk, "mmc1_sample", "mmc1",
+ 0x08c, 20, 3, 0);
+
+static SUNXI_CCU_MP_WITH_MUX_GATE(mmc2_clk, "mmc2", mod0_default_parents, 0x090,
+ 0, 4, /* M */
+ 16, 2, /* P */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+/* MMC output and sample clocks are not present on A10 */
+static SUNXI_CCU_PHASE(mmc2_output_clk, "mmc2_output", "mmc2",
+ 0x090, 8, 3, 0);
+static SUNXI_CCU_PHASE(mmc2_sample_clk, "mmc2_sample", "mmc2",
+ 0x090, 20, 3, 0);
+
+static SUNXI_CCU_MP_WITH_MUX_GATE(mmc3_clk, "mmc3", mod0_default_parents, 0x094,
+ 0, 4, /* M */
+ 16, 2, /* P */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+/* MMC output and sample clocks are not present on A10 */
+static SUNXI_CCU_PHASE(mmc3_output_clk, "mmc3_output", "mmc3",
+ 0x094, 8, 3, 0);
+static SUNXI_CCU_PHASE(mmc3_sample_clk, "mmc3_sample", "mmc3",
+ 0x094, 20, 3, 0);
+
+static SUNXI_CCU_MP_WITH_MUX_GATE(ts_clk, "ts", mod0_default_parents, 0x098,
+ 0, 4, /* M */
+ 16, 2, /* P */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_MP_WITH_MUX_GATE(ss_clk, "ss", mod0_default_parents, 0x09c,
+ 0, 4, /* M */
+ 16, 2, /* P */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_MP_WITH_MUX_GATE(spi0_clk, "spi0", mod0_default_parents, 0x0a0,
+ 0, 4, /* M */
+ 16, 2, /* P */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_MP_WITH_MUX_GATE(spi1_clk, "spi1", mod0_default_parents, 0x0a4,
+ 0, 4, /* M */
+ 16, 2, /* P */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_MP_WITH_MUX_GATE(spi2_clk, "spi2", mod0_default_parents, 0x0a8,
+ 0, 4, /* M */
+ 16, 2, /* P */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+/* Undocumented on A10 */
+static SUNXI_CCU_MP_WITH_MUX_GATE(pata_clk, "pata", mod0_default_parents, 0x0ac,
+ 0, 4, /* M */
+ 16, 2, /* P */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+/* TODO: Check whether A10 actually supports osc32k as 4th parent? */
+static const char *const ir_parents_sun4i[] = { "hosc", "pll-periph",
+ "pll-ddr-other" };
+static SUNXI_CCU_MP_WITH_MUX_GATE(ir0_sun4i_clk, "ir0", ir_parents_sun4i, 0x0b0,
+ 0, 4, /* M */
+ 16, 2, /* P */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_MP_WITH_MUX_GATE(ir1_sun4i_clk, "ir1", ir_parents_sun4i, 0x0b4,
+ 0, 4, /* M */
+ 16, 2, /* P */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ 0);
+static const char *const ir_parents_sun7i[] = { "hosc", "pll-periph",
+ "pll-ddr-other", "osc32k" };
+static SUNXI_CCU_MP_WITH_MUX_GATE(ir0_sun7i_clk, "ir0", ir_parents_sun7i, 0x0b0,
+ 0, 4, /* M */
+ 16, 2, /* P */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_MP_WITH_MUX_GATE(ir1_sun7i_clk, "ir1", ir_parents_sun7i, 0x0b4,
+ 0, 4, /* M */
+ 16, 2, /* P */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static const char *const audio_parents[] = { "pll-audio-8x", "pll-audio-4x",
+ "pll-audio-2x", "pll-audio" };
+static SUNXI_CCU_MUX_WITH_GATE(i2s0_clk, "i2s0", audio_parents,
+ 0x0b8, 16, 2, BIT(31), CLK_SET_RATE_PARENT);
+
+static SUNXI_CCU_MUX_WITH_GATE(ac97_clk, "ac97", audio_parents,
+ 0x0bc, 16, 2, BIT(31), CLK_SET_RATE_PARENT);
+
+/* Undocumented on A10 */
+static SUNXI_CCU_MUX_WITH_GATE(spdif_clk, "spdif", audio_parents,
+ 0x0c0, 16, 2, BIT(31), CLK_SET_RATE_PARENT);
+
+static const char *const keypad_parents[] = { "hosc", "losc"};
+static const u8 keypad_table[] = { 0, 2 };
+static struct ccu_mp keypad_clk = {
+ .enable = BIT(31),
+ .m = _SUNXI_CCU_DIV(0, 5),
+ .p = _SUNXI_CCU_DIV(16, 2),
+ .mux = _SUNXI_CCU_MUX_TABLE(24, 2, keypad_table),
+ .common = {
+ .reg = 0x0c4,
+ .hw.init = CLK_HW_INIT_PARENTS("keypad",
+ keypad_parents,
+ &ccu_mp_ops,
+ 0),
+ },
+};
+
+/*
+ * SATA supports external clock as parent via BIT(24) and is probably an
+ * optional crystal or oscillator that can be connected to the
+ * SATA-CLKM / SATA-CLKP pins.
+ */
+static const char *const sata_parents[] = {"pll-periph-sata", "sata-ext"};
+static SUNXI_CCU_MUX_WITH_GATE(sata_clk, "sata", sata_parents,
+ 0x0c8, 24, 1, BIT(31), CLK_SET_RATE_PARENT);
+
+
+static SUNXI_CCU_GATE(usb_ohci0_clk, "usb-ohci0", "pll-periph",
+ 0x0cc, BIT(6), 0);
+static SUNXI_CCU_GATE(usb_ohci1_clk, "usb-ohci1", "pll-periph",
+ 0x0cc, BIT(7), 0);
+static SUNXI_CCU_GATE(usb_phy_clk, "usb-phy", "pll-periph",
+ 0x0cc, BIT(8), 0);
+
+/* TODO: GPS CLK 0x0d0 */
+
+static SUNXI_CCU_MP_WITH_MUX_GATE(spi3_clk, "spi3", mod0_default_parents, 0x0d4,
+ 0, 4, /* M */
+ 16, 2, /* P */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+/* Not present on A10 */
+static SUNXI_CCU_MUX_WITH_GATE(i2s1_clk, "i2s1", audio_parents,
+ 0x0d8, 16, 2, BIT(31), CLK_SET_RATE_PARENT);
+
+/* Not present on A10 */
+static SUNXI_CCU_MUX_WITH_GATE(i2s2_clk, "i2s2", audio_parents,
+ 0x0dc, 16, 2, BIT(31), CLK_SET_RATE_PARENT);
+
+static SUNXI_CCU_GATE(dram_ve_clk, "dram-ve", "pll-ddr",
+ 0x100, BIT(0), 0);
+static SUNXI_CCU_GATE(dram_csi0_clk, "dram-csi0", "pll-ddr",
+ 0x100, BIT(1), 0);
+static SUNXI_CCU_GATE(dram_csi1_clk, "dram-csi1", "pll-ddr",
+ 0x100, BIT(2), 0);
+static SUNXI_CCU_GATE(dram_ts_clk, "dram-ts", "pll-ddr",
+ 0x100, BIT(3), 0);
+static SUNXI_CCU_GATE(dram_tvd_clk, "dram-tvd", "pll-ddr",
+ 0x100, BIT(4), 0);
+static SUNXI_CCU_GATE(dram_tve0_clk, "dram-tve0", "pll-ddr",
+ 0x100, BIT(5), 0);
+static SUNXI_CCU_GATE(dram_tve1_clk, "dram-tve1", "pll-ddr",
+ 0x100, BIT(6), 0);
+
+/* Clock seems to be critical only on sun4i */
+static SUNXI_CCU_GATE(dram_out_clk, "dram-out", "pll-ddr",
+ 0x100, BIT(15), CLK_IS_CRITICAL);
+static SUNXI_CCU_GATE(dram_de_fe1_clk, "dram-de-fe1", "pll-ddr",
+ 0x100, BIT(24), 0);
+static SUNXI_CCU_GATE(dram_de_fe0_clk, "dram-de-fe0", "pll-ddr",
+ 0x100, BIT(25), 0);
+static SUNXI_CCU_GATE(dram_de_be0_clk, "dram-de-be0", "pll-ddr",
+ 0x100, BIT(26), 0);
+static SUNXI_CCU_GATE(dram_de_be1_clk, "dram-de-be1", "pll-ddr",
+ 0x100, BIT(27), 0);
+static SUNXI_CCU_GATE(dram_mp_clk, "dram-mp", "pll-ddr",
+ 0x100, BIT(28), 0);
+static SUNXI_CCU_GATE(dram_ace_clk, "dram-ace", "pll-ddr",
+ 0x100, BIT(29), 0);
+
+static const char *const de_parents[] = { "pll-video0", "pll-video1",
+ "pll-ddr-other" };
+static SUNXI_CCU_M_WITH_MUX_GATE(de_be0_clk, "de-be0", de_parents,
+ 0x104, 0, 4, 24, 2, BIT(31), 0);
+
+static SUNXI_CCU_M_WITH_MUX_GATE(de_be1_clk, "de-be1", de_parents,
+ 0x108, 0, 4, 24, 2, BIT(31), 0);
+
+static SUNXI_CCU_M_WITH_MUX_GATE(de_fe0_clk, "de-fe0", de_parents,
+ 0x10c, 0, 4, 24, 2, BIT(31), 0);
+
+static SUNXI_CCU_M_WITH_MUX_GATE(de_fe1_clk, "de-fe1", de_parents,
+ 0x110, 0, 4, 24, 2, BIT(31), 0);
+
+/* Undocumented on A10 */
+static SUNXI_CCU_M_WITH_MUX_GATE(de_mp_clk, "de-mp", de_parents,
+ 0x114, 0, 4, 24, 2, BIT(31), 0);
+
+static const char *const disp_parents[] = { "pll-video0", "pll-video1",
+ "pll-video0-2x", "pll-video1-2x" };
+static SUNXI_CCU_MUX_WITH_GATE(tcon0_ch0_clk, "tcon0-ch0-sclk", disp_parents,
+ 0x118, 24, 2, BIT(31), CLK_SET_RATE_PARENT);
+static SUNXI_CCU_MUX_WITH_GATE(tcon1_ch0_clk, "tcon1-ch0-sclk", disp_parents,
+ 0x11c, 24, 2, BIT(31), CLK_SET_RATE_PARENT);
+
+static const char *const csi_sclk_parents[] = { "pll-video0", "pll-ve",
+ "pll-ddr-other", "pll-periph" };
+
+static SUNXI_CCU_M_WITH_MUX_GATE(csi_sclk_clk, "csi-sclk",
+ csi_sclk_parents,
+ 0x120, 0, 4, 24, 2, BIT(31), 0);
+
+/* TVD clock setup for A10 */
+static const char *const tvd_parents[] = { "pll-video0", "pll-video1" };
+static SUNXI_CCU_MUX_WITH_GATE(tvd_sun4i_clk, "tvd", tvd_parents,
+ 0x128, 24, 1, BIT(31), 0);
+
+/* TVD clock setup for A20 */
+static SUNXI_CCU_MP_WITH_MUX_GATE(tvd_sclk2_sun7i_clk,
+ "tvd-sclk2", tvd_parents,
+ 0x128,
+ 0, 4, /* M */
+ 16, 4, /* P */
+ 8, 1, /* mux */
+ BIT(15), /* gate */
+ 0);
+
+static SUNXI_CCU_M_WITH_GATE(tvd_sclk1_sun7i_clk, "tvd-sclk1", "tvd-sclk2",
+ 0x128, 0, 4, BIT(31), 0);
+
+static SUNXI_CCU_M_WITH_MUX_GATE(tcon0_ch1_sclk2_clk, "tcon0-ch1-sclk2",
+ disp_parents,
+ 0x12c, 0, 4, 24, 2, BIT(31),
+ CLK_SET_RATE_PARENT);
+
+static SUNXI_CCU_M_WITH_GATE(tcon0_ch1_clk,
+ "tcon0-ch1-sclk1", "tcon0-ch1-sclk2",
+ 0x12c, 11, 1, BIT(15),
+ CLK_SET_RATE_PARENT);
+
+static SUNXI_CCU_M_WITH_MUX_GATE(tcon1_ch1_sclk2_clk, "tcon1-ch1-sclk2",
+ disp_parents,
+ 0x130, 0, 4, 24, 2, BIT(31),
+ CLK_SET_RATE_PARENT);
+
+static SUNXI_CCU_M_WITH_GATE(tcon1_ch1_clk,
+ "tcon1-ch1-sclk1", "tcon1-ch1-sclk2",
+ 0x130, 11, 1, BIT(15),
+ CLK_SET_RATE_PARENT);
+
+static const char *const csi_parents[] = { "hosc", "pll-video0", "pll-video1",
+ "pll-video0-2x", "pll-video1-2x"};
+static const u8 csi_table[] = { 0, 1, 2, 5, 6};
+static SUNXI_CCU_M_WITH_MUX_TABLE_GATE(csi0_clk, "csi0",
+ csi_parents, csi_table,
+ 0x134, 0, 5, 24, 3, BIT(31), 0);
+
+static SUNXI_CCU_M_WITH_MUX_TABLE_GATE(csi1_clk, "csi1",
+ csi_parents, csi_table,
+ 0x138, 0, 5, 24, 3, BIT(31), 0);
+
+static SUNXI_CCU_M_WITH_GATE(ve_clk, "ve", "pll-ve", 0x13c, 16, 8, BIT(31), 0);
+
+static SUNXI_CCU_GATE(codec_clk, "codec", "pll-audio",
+ 0x140, BIT(31), CLK_SET_RATE_PARENT);
+
+static SUNXI_CCU_GATE(avs_clk, "avs", "hosc", 0x144, BIT(31), 0);
+
+static const char *const ace_parents[] = { "pll-ve", "pll-ddr-other" };
+static SUNXI_CCU_M_WITH_MUX_GATE(ace_clk, "ace", ace_parents,
+ 0x148, 0, 4, 24, 1, BIT(31), 0);
+
+static SUNXI_CCU_M_WITH_MUX_GATE(hdmi_clk, "hdmi", disp_parents,
+ 0x150, 0, 4, 24, 2, BIT(31),
+ CLK_SET_RATE_PARENT);
+
+static const char *const gpu_parents_sun4i[] = { "pll-video0", "pll-ve",
+ "pll-ddr-other",
+ "pll-video1" };
+static SUNXI_CCU_M_WITH_MUX_GATE(gpu_sun4i_clk, "gpu", gpu_parents_sun4i,
+ 0x154, 0, 4, 24, 2, BIT(31),
+ CLK_SET_RATE_PARENT);
+
+static const char *const gpu_parents_sun7i[] = { "pll-video0", "pll-ve",
+ "pll-ddr-other", "pll-video1",
+ "pll-gpu" };
+static const u8 gpu_table_sun7i[] = { 0, 1, 2, 3, 4 };
+static SUNXI_CCU_M_WITH_MUX_TABLE_GATE(gpu_sun7i_clk, "gpu",
+ gpu_parents_sun7i, gpu_table_sun7i,
+ 0x154, 0, 4, 24, 3, BIT(31),
+ CLK_SET_RATE_PARENT);
+
+static const char *const mbus_sun4i_parents[] = { "hosc", "pll-periph",
+ "pll-ddr-other" };
+static SUNXI_CCU_MP_WITH_MUX_GATE(mbus_sun4i_clk, "mbus", mbus_sun4i_parents,
+ 0x15c, 0, 4, 16, 2, 24, 2, BIT(31),
+ 0);
+static const char *const mbus_sun7i_parents[] = { "hosc", "pll-periph-base",
+ "pll-ddr-other" };
+static SUNXI_CCU_MP_WITH_MUX_GATE(mbus_sun7i_clk, "mbus", mbus_sun7i_parents,
+ 0x15c, 0, 4, 16, 2, 24, 2, BIT(31),
+ CLK_IS_CRITICAL);
+
+static SUNXI_CCU_GATE(hdmi1_slow_clk, "hdmi1-slow", "hosc", 0x178, BIT(31), 0);
+
+static const char *const hdmi1_parents[] = { "pll-video0", "pll-video1" };
+static const u8 hdmi1_table[] = { 0, 1};
+static SUNXI_CCU_M_WITH_MUX_TABLE_GATE(hdmi1_clk, "hdmi1",
+ hdmi1_parents, hdmi1_table,
+ 0x17c, 0, 4, 24, 2, BIT(31),
+ CLK_SET_RATE_PARENT);
+
+static const char *const out_parents[] = { "hosc", "osc32k", "hosc" };
+static const struct ccu_mux_fixed_prediv clk_out_predivs[] = {
+ { .index = 0, .div = 750, },
+};
+
+static struct ccu_mp out_a_clk = {
+ .enable = BIT(31),
+ .m = _SUNXI_CCU_DIV(8, 5),
+ .p = _SUNXI_CCU_DIV(20, 2),
+ .mux = {
+ .shift = 24,
+ .width = 2,
+ .fixed_predivs = clk_out_predivs,
+ .n_predivs = ARRAY_SIZE(clk_out_predivs),
+ },
+ .common = {
+ .reg = 0x1f0,
+ .features = CCU_FEATURE_FIXED_PREDIV,
+ .hw.init = CLK_HW_INIT_PARENTS("out-a",
+ out_parents,
+ &ccu_mp_ops,
+ 0),
+ },
+};
+static struct ccu_mp out_b_clk = {
+ .enable = BIT(31),
+ .m = _SUNXI_CCU_DIV(8, 5),
+ .p = _SUNXI_CCU_DIV(20, 2),
+ .mux = {
+ .shift = 24,
+ .width = 2,
+ .fixed_predivs = clk_out_predivs,
+ .n_predivs = ARRAY_SIZE(clk_out_predivs),
+ },
+ .common = {
+ .reg = 0x1f4,
+ .features = CCU_FEATURE_FIXED_PREDIV,
+ .hw.init = CLK_HW_INIT_PARENTS("out-b",
+ out_parents,
+ &ccu_mp_ops,
+ 0),
+ },
+};
+
+static struct ccu_common *sun4i_sun7i_ccu_clks[] = {
+ &hosc_clk.common,
+ &pll_core_clk.common,
+ &pll_audio_base_clk.common,
+ &pll_video0_clk.common,
+ &pll_ve_sun4i_clk.common,
+ &pll_ve_sun7i_clk.common,
+ &pll_ddr_base_clk.common,
+ &pll_ddr_clk.common,
+ &pll_ddr_other_clk.common,
+ &pll_periph_base_clk.common,
+ &pll_periph_sata_clk.common,
+ &pll_video1_clk.common,
+ &pll_gpu_clk.common,
+ &cpu_clk.common,
+ &axi_clk.common,
+ &axi_dram_clk.common,
+ &ahb_sun4i_clk.common,
+ &ahb_sun7i_clk.common,
+ &apb0_clk.common,
+ &apb1_clk.common,
+ &ahb_otg_clk.common,
+ &ahb_ehci0_clk.common,
+ &ahb_ohci0_clk.common,
+ &ahb_ehci1_clk.common,
+ &ahb_ohci1_clk.common,
+ &ahb_ss_clk.common,
+ &ahb_dma_clk.common,
+ &ahb_bist_clk.common,
+ &ahb_mmc0_clk.common,
+ &ahb_mmc1_clk.common,
+ &ahb_mmc2_clk.common,
+ &ahb_mmc3_clk.common,
+ &ahb_ms_clk.common,
+ &ahb_nand_clk.common,
+ &ahb_sdram_clk.common,
+ &ahb_ace_clk.common,
+ &ahb_emac_clk.common,
+ &ahb_ts_clk.common,
+ &ahb_spi0_clk.common,
+ &ahb_spi1_clk.common,
+ &ahb_spi2_clk.common,
+ &ahb_spi3_clk.common,
+ &ahb_pata_clk.common,
+ &ahb_sata_clk.common,
+ &ahb_gps_clk.common,
+ &ahb_hstimer_clk.common,
+ &ahb_ve_clk.common,
+ &ahb_tvd_clk.common,
+ &ahb_tve0_clk.common,
+ &ahb_tve1_clk.common,
+ &ahb_lcd0_clk.common,
+ &ahb_lcd1_clk.common,
+ &ahb_csi0_clk.common,
+ &ahb_csi1_clk.common,
+ &ahb_hdmi1_clk.common,
+ &ahb_hdmi0_clk.common,
+ &ahb_de_be0_clk.common,
+ &ahb_de_be1_clk.common,
+ &ahb_de_fe0_clk.common,
+ &ahb_de_fe1_clk.common,
+ &ahb_gmac_clk.common,
+ &ahb_mp_clk.common,
+ &ahb_gpu_clk.common,
+ &apb0_codec_clk.common,
+ &apb0_spdif_clk.common,
+ &apb0_ac97_clk.common,
+ &apb0_i2s0_clk.common,
+ &apb0_i2s1_clk.common,
+ &apb0_pio_clk.common,
+ &apb0_ir0_clk.common,
+ &apb0_ir1_clk.common,
+ &apb0_i2s2_clk.common,
+ &apb0_keypad_clk.common,
+ &apb1_i2c0_clk.common,
+ &apb1_i2c1_clk.common,
+ &apb1_i2c2_clk.common,
+ &apb1_i2c3_clk.common,
+ &apb1_can_clk.common,
+ &apb1_scr_clk.common,
+ &apb1_ps20_clk.common,
+ &apb1_ps21_clk.common,
+ &apb1_i2c4_clk.common,
+ &apb1_uart0_clk.common,
+ &apb1_uart1_clk.common,
+ &apb1_uart2_clk.common,
+ &apb1_uart3_clk.common,
+ &apb1_uart4_clk.common,
+ &apb1_uart5_clk.common,
+ &apb1_uart6_clk.common,
+ &apb1_uart7_clk.common,
+ &nand_clk.common,
+ &ms_clk.common,
+ &mmc0_clk.common,
+ &mmc0_output_clk.common,
+ &mmc0_sample_clk.common,
+ &mmc1_clk.common,
+ &mmc1_output_clk.common,
+ &mmc1_sample_clk.common,
+ &mmc2_clk.common,
+ &mmc2_output_clk.common,
+ &mmc2_sample_clk.common,
+ &mmc3_clk.common,
+ &mmc3_output_clk.common,
+ &mmc3_sample_clk.common,
+ &ts_clk.common,
+ &ss_clk.common,
+ &spi0_clk.common,
+ &spi1_clk.common,
+ &spi2_clk.common,
+ &pata_clk.common,
+ &ir0_sun4i_clk.common,
+ &ir1_sun4i_clk.common,
+ &ir0_sun7i_clk.common,
+ &ir1_sun7i_clk.common,
+ &i2s0_clk.common,
+ &ac97_clk.common,
+ &spdif_clk.common,
+ &keypad_clk.common,
+ &sata_clk.common,
+ &usb_ohci0_clk.common,
+ &usb_ohci1_clk.common,
+ &usb_phy_clk.common,
+ &spi3_clk.common,
+ &i2s1_clk.common,
+ &i2s2_clk.common,
+ &dram_ve_clk.common,
+ &dram_csi0_clk.common,
+ &dram_csi1_clk.common,
+ &dram_ts_clk.common,
+ &dram_tvd_clk.common,
+ &dram_tve0_clk.common,
+ &dram_tve1_clk.common,
+ &dram_out_clk.common,
+ &dram_de_fe1_clk.common,
+ &dram_de_fe0_clk.common,
+ &dram_de_be0_clk.common,
+ &dram_de_be1_clk.common,
+ &dram_mp_clk.common,
+ &dram_ace_clk.common,
+ &de_be0_clk.common,
+ &de_be1_clk.common,
+ &de_fe0_clk.common,
+ &de_fe1_clk.common,
+ &de_mp_clk.common,
+ &tcon0_ch0_clk.common,
+ &tcon1_ch0_clk.common,
+ &csi_sclk_clk.common,
+ &tvd_sun4i_clk.common,
+ &tvd_sclk1_sun7i_clk.common,
+ &tvd_sclk2_sun7i_clk.common,
+ &tcon0_ch1_sclk2_clk.common,
+ &tcon0_ch1_clk.common,
+ &tcon1_ch1_sclk2_clk.common,
+ &tcon1_ch1_clk.common,
+ &csi0_clk.common,
+ &csi1_clk.common,
+ &ve_clk.common,
+ &codec_clk.common,
+ &avs_clk.common,
+ &ace_clk.common,
+ &hdmi_clk.common,
+ &gpu_sun4i_clk.common,
+ &gpu_sun7i_clk.common,
+ &mbus_sun4i_clk.common,
+ &mbus_sun7i_clk.common,
+ &hdmi1_slow_clk.common,
+ &hdmi1_clk.common,
+ &out_a_clk.common,
+ &out_b_clk.common
+};
+
+/* Post-divider for pll-audio is hardcoded to 4 */
+static CLK_FIXED_FACTOR(pll_audio_clk, "pll-audio",
+ "pll-audio-base", 4, 1, CLK_SET_RATE_PARENT);
+static CLK_FIXED_FACTOR(pll_audio_2x_clk, "pll-audio-2x",
+ "pll-audio-base", 2, 1, CLK_SET_RATE_PARENT);
+static CLK_FIXED_FACTOR(pll_audio_4x_clk, "pll-audio-4x",
+ "pll-audio-base", 1, 1, CLK_SET_RATE_PARENT);
+static CLK_FIXED_FACTOR(pll_audio_8x_clk, "pll-audio-8x",
+ "pll-audio-base", 1, 2, CLK_SET_RATE_PARENT);
+static CLK_FIXED_FACTOR(pll_video0_2x_clk, "pll-video0-2x",
+ "pll-video0", 1, 2, CLK_SET_RATE_PARENT);
+static CLK_FIXED_FACTOR(pll_video1_2x_clk, "pll-video1-2x",
+ "pll-video1", 1, 2, CLK_SET_RATE_PARENT);
+
+
+static struct clk_hw_onecell_data sun4i_a10_hw_clks = {
+ .hws = {
+ [CLK_HOSC] = &hosc_clk.common.hw,
+ [CLK_PLL_CORE] = &pll_core_clk.common.hw,
+ [CLK_PLL_AUDIO_BASE] = &pll_audio_base_clk.common.hw,
+ [CLK_PLL_AUDIO] = &pll_audio_clk.hw,
+ [CLK_PLL_AUDIO_2X] = &pll_audio_2x_clk.hw,
+ [CLK_PLL_AUDIO_4X] = &pll_audio_4x_clk.hw,
+ [CLK_PLL_AUDIO_8X] = &pll_audio_8x_clk.hw,
+ [CLK_PLL_VIDEO0] = &pll_video0_clk.common.hw,
+ [CLK_PLL_VIDEO0_2X] = &pll_video0_2x_clk.hw,
+ [CLK_PLL_VE] = &pll_ve_sun4i_clk.common.hw,
+ [CLK_PLL_DDR_BASE] = &pll_ddr_base_clk.common.hw,
+ [CLK_PLL_DDR] = &pll_ddr_clk.common.hw,
+ [CLK_PLL_DDR_OTHER] = &pll_ddr_other_clk.common.hw,
+ [CLK_PLL_PERIPH_BASE] = &pll_periph_base_clk.common.hw,
+ [CLK_PLL_PERIPH] = &pll_periph_clk.hw,
+ [CLK_PLL_PERIPH_SATA] = &pll_periph_sata_clk.common.hw,
+ [CLK_PLL_VIDEO1] = &pll_video1_clk.common.hw,
+ [CLK_PLL_VIDEO1_2X] = &pll_video1_2x_clk.hw,
+ [CLK_CPU] = &cpu_clk.common.hw,
+ [CLK_AXI] = &axi_clk.common.hw,
+ [CLK_AXI_DRAM] = &axi_dram_clk.common.hw,
+ [CLK_AHB] = &ahb_sun4i_clk.common.hw,
+ [CLK_APB0] = &apb0_clk.common.hw,
+ [CLK_APB1] = &apb1_clk.common.hw,
+ [CLK_AHB_OTG] = &ahb_otg_clk.common.hw,
+ [CLK_AHB_EHCI0] = &ahb_ehci0_clk.common.hw,
+ [CLK_AHB_OHCI0] = &ahb_ohci0_clk.common.hw,
+ [CLK_AHB_EHCI1] = &ahb_ehci1_clk.common.hw,
+ [CLK_AHB_OHCI1] = &ahb_ohci1_clk.common.hw,
+ [CLK_AHB_SS] = &ahb_ss_clk.common.hw,
+ [CLK_AHB_DMA] = &ahb_dma_clk.common.hw,
+ [CLK_AHB_BIST] = &ahb_bist_clk.common.hw,
+ [CLK_AHB_MMC0] = &ahb_mmc0_clk.common.hw,
+ [CLK_AHB_MMC1] = &ahb_mmc1_clk.common.hw,
+ [CLK_AHB_MMC2] = &ahb_mmc2_clk.common.hw,
+ [CLK_AHB_MMC3] = &ahb_mmc3_clk.common.hw,
+ [CLK_AHB_MS] = &ahb_ms_clk.common.hw,
+ [CLK_AHB_NAND] = &ahb_nand_clk.common.hw,
+ [CLK_AHB_SDRAM] = &ahb_sdram_clk.common.hw,
+ [CLK_AHB_ACE] = &ahb_ace_clk.common.hw,
+ [CLK_AHB_EMAC] = &ahb_emac_clk.common.hw,
+ [CLK_AHB_TS] = &ahb_ts_clk.common.hw,
+ [CLK_AHB_SPI0] = &ahb_spi0_clk.common.hw,
+ [CLK_AHB_SPI1] = &ahb_spi1_clk.common.hw,
+ [CLK_AHB_SPI2] = &ahb_spi2_clk.common.hw,
+ [CLK_AHB_SPI3] = &ahb_spi3_clk.common.hw,
+ [CLK_AHB_PATA] = &ahb_pata_clk.common.hw,
+ [CLK_AHB_SATA] = &ahb_sata_clk.common.hw,
+ [CLK_AHB_GPS] = &ahb_gps_clk.common.hw,
+ [CLK_AHB_VE] = &ahb_ve_clk.common.hw,
+ [CLK_AHB_TVD] = &ahb_tvd_clk.common.hw,
+ [CLK_AHB_TVE0] = &ahb_tve0_clk.common.hw,
+ [CLK_AHB_TVE1] = &ahb_tve1_clk.common.hw,
+ [CLK_AHB_LCD0] = &ahb_lcd0_clk.common.hw,
+ [CLK_AHB_LCD1] = &ahb_lcd1_clk.common.hw,
+ [CLK_AHB_CSI0] = &ahb_csi0_clk.common.hw,
+ [CLK_AHB_CSI1] = &ahb_csi1_clk.common.hw,
+ [CLK_AHB_HDMI0] = &ahb_hdmi0_clk.common.hw,
+ [CLK_AHB_DE_BE0] = &ahb_de_be0_clk.common.hw,
+ [CLK_AHB_DE_BE1] = &ahb_de_be1_clk.common.hw,
+ [CLK_AHB_DE_FE0] = &ahb_de_fe0_clk.common.hw,
+ [CLK_AHB_DE_FE1] = &ahb_de_fe1_clk.common.hw,
+ [CLK_AHB_MP] = &ahb_mp_clk.common.hw,
+ [CLK_AHB_GPU] = &ahb_gpu_clk.common.hw,
+ [CLK_APB0_CODEC] = &apb0_codec_clk.common.hw,
+ [CLK_APB0_SPDIF] = &apb0_spdif_clk.common.hw,
+ [CLK_APB0_AC97] = &apb0_ac97_clk.common.hw,
+ [CLK_APB0_I2S0] = &apb0_i2s0_clk.common.hw,
+ [CLK_APB0_PIO] = &apb0_pio_clk.common.hw,
+ [CLK_APB0_IR0] = &apb0_ir0_clk.common.hw,
+ [CLK_APB0_IR1] = &apb0_ir1_clk.common.hw,
+ [CLK_APB0_KEYPAD] = &apb0_keypad_clk.common.hw,
+ [CLK_APB1_I2C0] = &apb1_i2c0_clk.common.hw,
+ [CLK_APB1_I2C1] = &apb1_i2c1_clk.common.hw,
+ [CLK_APB1_I2C2] = &apb1_i2c2_clk.common.hw,
+ [CLK_APB1_CAN] = &apb1_can_clk.common.hw,
+ [CLK_APB1_SCR] = &apb1_scr_clk.common.hw,
+ [CLK_APB1_PS20] = &apb1_ps20_clk.common.hw,
+ [CLK_APB1_PS21] = &apb1_ps21_clk.common.hw,
+ [CLK_APB1_UART0] = &apb1_uart0_clk.common.hw,
+ [CLK_APB1_UART1] = &apb1_uart1_clk.common.hw,
+ [CLK_APB1_UART2] = &apb1_uart2_clk.common.hw,
+ [CLK_APB1_UART3] = &apb1_uart3_clk.common.hw,
+ [CLK_APB1_UART4] = &apb1_uart4_clk.common.hw,
+ [CLK_APB1_UART5] = &apb1_uart5_clk.common.hw,
+ [CLK_APB1_UART6] = &apb1_uart6_clk.common.hw,
+ [CLK_APB1_UART7] = &apb1_uart7_clk.common.hw,
+ [CLK_NAND] = &nand_clk.common.hw,
+ [CLK_MS] = &ms_clk.common.hw,
+ [CLK_MMC0] = &mmc0_clk.common.hw,
+ [CLK_MMC1] = &mmc1_clk.common.hw,
+ [CLK_MMC2] = &mmc2_clk.common.hw,
+ [CLK_MMC3] = &mmc3_clk.common.hw,
+ [CLK_TS] = &ts_clk.common.hw,
+ [CLK_SS] = &ss_clk.common.hw,
+ [CLK_SPI0] = &spi0_clk.common.hw,
+ [CLK_SPI1] = &spi1_clk.common.hw,
+ [CLK_SPI2] = &spi2_clk.common.hw,
+ [CLK_PATA] = &pata_clk.common.hw,
+ [CLK_IR0] = &ir0_sun4i_clk.common.hw,
+ [CLK_IR1] = &ir1_sun4i_clk.common.hw,
+ [CLK_I2S0] = &i2s0_clk.common.hw,
+ [CLK_AC97] = &ac97_clk.common.hw,
+ [CLK_SPDIF] = &spdif_clk.common.hw,
+ [CLK_KEYPAD] = &keypad_clk.common.hw,
+ [CLK_SATA] = &sata_clk.common.hw,
+ [CLK_USB_OHCI0] = &usb_ohci0_clk.common.hw,
+ [CLK_USB_OHCI1] = &usb_ohci1_clk.common.hw,
+ [CLK_USB_PHY] = &usb_phy_clk.common.hw,
+ /* CLK_GPS is unimplemented */
+ [CLK_SPI3] = &spi3_clk.common.hw,
+ [CLK_DRAM_VE] = &dram_ve_clk.common.hw,
+ [CLK_DRAM_CSI0] = &dram_csi0_clk.common.hw,
+ [CLK_DRAM_CSI1] = &dram_csi1_clk.common.hw,
+ [CLK_DRAM_TS] = &dram_ts_clk.common.hw,
+ [CLK_DRAM_TVD] = &dram_tvd_clk.common.hw,
+ [CLK_DRAM_TVE0] = &dram_tve0_clk.common.hw,
+ [CLK_DRAM_TVE1] = &dram_tve1_clk.common.hw,
+ [CLK_DRAM_OUT] = &dram_out_clk.common.hw,
+ [CLK_DRAM_DE_FE1] = &dram_de_fe1_clk.common.hw,
+ [CLK_DRAM_DE_FE0] = &dram_de_fe0_clk.common.hw,
+ [CLK_DRAM_DE_BE0] = &dram_de_be0_clk.common.hw,
+ [CLK_DRAM_DE_BE1] = &dram_de_be1_clk.common.hw,
+ [CLK_DRAM_MP] = &dram_mp_clk.common.hw,
+ [CLK_DRAM_ACE] = &dram_ace_clk.common.hw,
+ [CLK_DE_BE0] = &de_be0_clk.common.hw,
+ [CLK_DE_BE1] = &de_be1_clk.common.hw,
+ [CLK_DE_FE0] = &de_fe0_clk.common.hw,
+ [CLK_DE_FE1] = &de_fe1_clk.common.hw,
+ [CLK_DE_MP] = &de_mp_clk.common.hw,
+ [CLK_TCON0_CH0] = &tcon0_ch0_clk.common.hw,
+ [CLK_TCON1_CH0] = &tcon1_ch0_clk.common.hw,
+ [CLK_CSI_SCLK] = &csi_sclk_clk.common.hw,
+ [CLK_TVD] = &tvd_sun4i_clk.common.hw,
+ [CLK_TCON0_CH1_SCLK2] = &tcon0_ch1_sclk2_clk.common.hw,
+ [CLK_TCON0_CH1] = &tcon0_ch1_clk.common.hw,
+ [CLK_TCON1_CH1_SCLK2] = &tcon1_ch1_sclk2_clk.common.hw,
+ [CLK_TCON1_CH1] = &tcon1_ch1_clk.common.hw,
+ [CLK_CSI0] = &csi0_clk.common.hw,
+ [CLK_CSI1] = &csi1_clk.common.hw,
+ [CLK_VE] = &ve_clk.common.hw,
+ [CLK_CODEC] = &codec_clk.common.hw,
+ [CLK_AVS] = &avs_clk.common.hw,
+ [CLK_ACE] = &ace_clk.common.hw,
+ [CLK_HDMI] = &hdmi_clk.common.hw,
+ [CLK_GPU] = &gpu_sun7i_clk.common.hw,
+ [CLK_MBUS] = &mbus_sun4i_clk.common.hw,
+ },
+ .num = CLK_NUMBER_SUN4I,
+};
+static struct clk_hw_onecell_data sun7i_a20_hw_clks = {
+ .hws = {
+ [CLK_HOSC] = &hosc_clk.common.hw,
+ [CLK_PLL_CORE] = &pll_core_clk.common.hw,
+ [CLK_PLL_AUDIO_BASE] = &pll_audio_base_clk.common.hw,
+ [CLK_PLL_AUDIO] = &pll_audio_clk.hw,
+ [CLK_PLL_AUDIO_2X] = &pll_audio_2x_clk.hw,
+ [CLK_PLL_AUDIO_4X] = &pll_audio_4x_clk.hw,
+ [CLK_PLL_AUDIO_8X] = &pll_audio_8x_clk.hw,
+ [CLK_PLL_VIDEO0] = &pll_video0_clk.common.hw,
+ [CLK_PLL_VIDEO0_2X] = &pll_video0_2x_clk.hw,
+ [CLK_PLL_VE] = &pll_ve_sun7i_clk.common.hw,
+ [CLK_PLL_DDR_BASE] = &pll_ddr_base_clk.common.hw,
+ [CLK_PLL_DDR] = &pll_ddr_clk.common.hw,
+ [CLK_PLL_DDR_OTHER] = &pll_ddr_other_clk.common.hw,
+ [CLK_PLL_PERIPH_BASE] = &pll_periph_base_clk.common.hw,
+ [CLK_PLL_PERIPH] = &pll_periph_clk.hw,
+ [CLK_PLL_PERIPH_SATA] = &pll_periph_sata_clk.common.hw,
+ [CLK_PLL_VIDEO1] = &pll_video1_clk.common.hw,
+ [CLK_PLL_VIDEO1_2X] = &pll_video1_2x_clk.hw,
+ [CLK_PLL_GPU] = &pll_gpu_clk.common.hw,
+ [CLK_CPU] = &cpu_clk.common.hw,
+ [CLK_AXI] = &axi_clk.common.hw,
+ [CLK_AHB] = &ahb_sun7i_clk.common.hw,
+ [CLK_APB0] = &apb0_clk.common.hw,
+ [CLK_APB1] = &apb1_clk.common.hw,
+ [CLK_AHB_OTG] = &ahb_otg_clk.common.hw,
+ [CLK_AHB_EHCI0] = &ahb_ehci0_clk.common.hw,
+ [CLK_AHB_OHCI0] = &ahb_ohci0_clk.common.hw,
+ [CLK_AHB_EHCI1] = &ahb_ehci1_clk.common.hw,
+ [CLK_AHB_OHCI1] = &ahb_ohci1_clk.common.hw,
+ [CLK_AHB_SS] = &ahb_ss_clk.common.hw,
+ [CLK_AHB_DMA] = &ahb_dma_clk.common.hw,
+ [CLK_AHB_BIST] = &ahb_bist_clk.common.hw,
+ [CLK_AHB_MMC0] = &ahb_mmc0_clk.common.hw,
+ [CLK_AHB_MMC1] = &ahb_mmc1_clk.common.hw,
+ [CLK_AHB_MMC2] = &ahb_mmc2_clk.common.hw,
+ [CLK_AHB_MMC3] = &ahb_mmc3_clk.common.hw,
+ [CLK_AHB_MS] = &ahb_ms_clk.common.hw,
+ [CLK_AHB_NAND] = &ahb_nand_clk.common.hw,
+ [CLK_AHB_SDRAM] = &ahb_sdram_clk.common.hw,
+ [CLK_AHB_ACE] = &ahb_ace_clk.common.hw,
+ [CLK_AHB_EMAC] = &ahb_emac_clk.common.hw,
+ [CLK_AHB_TS] = &ahb_ts_clk.common.hw,
+ [CLK_AHB_SPI0] = &ahb_spi0_clk.common.hw,
+ [CLK_AHB_SPI1] = &ahb_spi1_clk.common.hw,
+ [CLK_AHB_SPI2] = &ahb_spi2_clk.common.hw,
+ [CLK_AHB_SPI3] = &ahb_spi3_clk.common.hw,
+ [CLK_AHB_PATA] = &ahb_pata_clk.common.hw,
+ [CLK_AHB_SATA] = &ahb_sata_clk.common.hw,
+ [CLK_AHB_HSTIMER] = &ahb_hstimer_clk.common.hw,
+ [CLK_AHB_VE] = &ahb_ve_clk.common.hw,
+ [CLK_AHB_TVD] = &ahb_tvd_clk.common.hw,
+ [CLK_AHB_TVE0] = &ahb_tve0_clk.common.hw,
+ [CLK_AHB_TVE1] = &ahb_tve1_clk.common.hw,
+ [CLK_AHB_LCD0] = &ahb_lcd0_clk.common.hw,
+ [CLK_AHB_LCD1] = &ahb_lcd1_clk.common.hw,
+ [CLK_AHB_CSI0] = &ahb_csi0_clk.common.hw,
+ [CLK_AHB_CSI1] = &ahb_csi1_clk.common.hw,
+ [CLK_AHB_HDMI1] = &ahb_hdmi1_clk.common.hw,
+ [CLK_AHB_HDMI0] = &ahb_hdmi0_clk.common.hw,
+ [CLK_AHB_DE_BE0] = &ahb_de_be0_clk.common.hw,
+ [CLK_AHB_DE_BE1] = &ahb_de_be1_clk.common.hw,
+ [CLK_AHB_DE_FE0] = &ahb_de_fe0_clk.common.hw,
+ [CLK_AHB_DE_FE1] = &ahb_de_fe1_clk.common.hw,
+ [CLK_AHB_GMAC] = &ahb_gmac_clk.common.hw,
+ [CLK_AHB_MP] = &ahb_mp_clk.common.hw,
+ [CLK_AHB_GPU] = &ahb_gpu_clk.common.hw,
+ [CLK_APB0_CODEC] = &apb0_codec_clk.common.hw,
+ [CLK_APB0_SPDIF] = &apb0_spdif_clk.common.hw,
+ [CLK_APB0_AC97] = &apb0_ac97_clk.common.hw,
+ [CLK_APB0_I2S0] = &apb0_i2s0_clk.common.hw,
+ [CLK_APB0_I2S1] = &apb0_i2s1_clk.common.hw,
+ [CLK_APB0_PIO] = &apb0_pio_clk.common.hw,
+ [CLK_APB0_IR0] = &apb0_ir0_clk.common.hw,
+ [CLK_APB0_IR1] = &apb0_ir1_clk.common.hw,
+ [CLK_APB0_I2S2] = &apb0_i2s2_clk.common.hw,
+ [CLK_APB0_KEYPAD] = &apb0_keypad_clk.common.hw,
+ [CLK_APB1_I2C0] = &apb1_i2c0_clk.common.hw,
+ [CLK_APB1_I2C1] = &apb1_i2c1_clk.common.hw,
+ [CLK_APB1_I2C2] = &apb1_i2c2_clk.common.hw,
+ [CLK_APB1_I2C3] = &apb1_i2c3_clk.common.hw,
+ [CLK_APB1_CAN] = &apb1_can_clk.common.hw,
+ [CLK_APB1_SCR] = &apb1_scr_clk.common.hw,
+ [CLK_APB1_PS20] = &apb1_ps20_clk.common.hw,
+ [CLK_APB1_PS21] = &apb1_ps21_clk.common.hw,
+ [CLK_APB1_I2C4] = &apb1_i2c4_clk.common.hw,
+ [CLK_APB1_UART0] = &apb1_uart0_clk.common.hw,
+ [CLK_APB1_UART1] = &apb1_uart1_clk.common.hw,
+ [CLK_APB1_UART2] = &apb1_uart2_clk.common.hw,
+ [CLK_APB1_UART3] = &apb1_uart3_clk.common.hw,
+ [CLK_APB1_UART4] = &apb1_uart4_clk.common.hw,
+ [CLK_APB1_UART5] = &apb1_uart5_clk.common.hw,
+ [CLK_APB1_UART6] = &apb1_uart6_clk.common.hw,
+ [CLK_APB1_UART7] = &apb1_uart7_clk.common.hw,
+ [CLK_NAND] = &nand_clk.common.hw,
+ [CLK_MS] = &ms_clk.common.hw,
+ [CLK_MMC0] = &mmc0_clk.common.hw,
+ [CLK_MMC0_OUTPUT] = &mmc0_output_clk.common.hw,
+ [CLK_MMC0_SAMPLE] = &mmc0_sample_clk.common.hw,
+ [CLK_MMC1] = &mmc1_clk.common.hw,
+ [CLK_MMC1_OUTPUT] = &mmc1_output_clk.common.hw,
+ [CLK_MMC1_SAMPLE] = &mmc1_sample_clk.common.hw,
+ [CLK_MMC2] = &mmc2_clk.common.hw,
+ [CLK_MMC2_OUTPUT] = &mmc2_output_clk.common.hw,
+ [CLK_MMC2_SAMPLE] = &mmc2_sample_clk.common.hw,
+ [CLK_MMC3] = &mmc3_clk.common.hw,
+ [CLK_MMC3_OUTPUT] = &mmc3_output_clk.common.hw,
+ [CLK_MMC3_SAMPLE] = &mmc3_sample_clk.common.hw,
+ [CLK_TS] = &ts_clk.common.hw,
+ [CLK_SS] = &ss_clk.common.hw,
+ [CLK_SPI0] = &spi0_clk.common.hw,
+ [CLK_SPI1] = &spi1_clk.common.hw,
+ [CLK_SPI2] = &spi2_clk.common.hw,
+ [CLK_PATA] = &pata_clk.common.hw,
+ [CLK_IR0] = &ir0_sun7i_clk.common.hw,
+ [CLK_IR1] = &ir1_sun7i_clk.common.hw,
+ [CLK_I2S0] = &i2s0_clk.common.hw,
+ [CLK_AC97] = &ac97_clk.common.hw,
+ [CLK_SPDIF] = &spdif_clk.common.hw,
+ [CLK_KEYPAD] = &keypad_clk.common.hw,
+ [CLK_SATA] = &sata_clk.common.hw,
+ [CLK_USB_OHCI0] = &usb_ohci0_clk.common.hw,
+ [CLK_USB_OHCI1] = &usb_ohci1_clk.common.hw,
+ [CLK_USB_PHY] = &usb_phy_clk.common.hw,
+ /* CLK_GPS is unimplemented */
+ [CLK_SPI3] = &spi3_clk.common.hw,
+ [CLK_I2S1] = &i2s1_clk.common.hw,
+ [CLK_I2S2] = &i2s2_clk.common.hw,
+ [CLK_DRAM_VE] = &dram_ve_clk.common.hw,
+ [CLK_DRAM_CSI0] = &dram_csi0_clk.common.hw,
+ [CLK_DRAM_CSI1] = &dram_csi1_clk.common.hw,
+ [CLK_DRAM_TS] = &dram_ts_clk.common.hw,
+ [CLK_DRAM_TVD] = &dram_tvd_clk.common.hw,
+ [CLK_DRAM_TVE0] = &dram_tve0_clk.common.hw,
+ [CLK_DRAM_TVE1] = &dram_tve1_clk.common.hw,
+ [CLK_DRAM_OUT] = &dram_out_clk.common.hw,
+ [CLK_DRAM_DE_FE1] = &dram_de_fe1_clk.common.hw,
+ [CLK_DRAM_DE_FE0] = &dram_de_fe0_clk.common.hw,
+ [CLK_DRAM_DE_BE0] = &dram_de_be0_clk.common.hw,
+ [CLK_DRAM_DE_BE1] = &dram_de_be1_clk.common.hw,
+ [CLK_DRAM_MP] = &dram_mp_clk.common.hw,
+ [CLK_DRAM_ACE] = &dram_ace_clk.common.hw,
+ [CLK_DE_BE0] = &de_be0_clk.common.hw,
+ [CLK_DE_BE1] = &de_be1_clk.common.hw,
+ [CLK_DE_FE0] = &de_fe0_clk.common.hw,
+ [CLK_DE_FE1] = &de_fe1_clk.common.hw,
+ [CLK_DE_MP] = &de_mp_clk.common.hw,
+ [CLK_TCON0_CH0] = &tcon0_ch0_clk.common.hw,
+ [CLK_TCON1_CH0] = &tcon1_ch0_clk.common.hw,
+ [CLK_CSI_SCLK] = &csi_sclk_clk.common.hw,
+ [CLK_TVD_SCLK2] = &tvd_sclk2_sun7i_clk.common.hw,
+ [CLK_TVD] = &tvd_sclk1_sun7i_clk.common.hw,
+ [CLK_TCON0_CH1_SCLK2] = &tcon0_ch1_sclk2_clk.common.hw,
+ [CLK_TCON0_CH1] = &tcon0_ch1_clk.common.hw,
+ [CLK_TCON1_CH1_SCLK2] = &tcon1_ch1_sclk2_clk.common.hw,
+ [CLK_TCON1_CH1] = &tcon1_ch1_clk.common.hw,
+ [CLK_CSI0] = &csi0_clk.common.hw,
+ [CLK_CSI1] = &csi1_clk.common.hw,
+ [CLK_VE] = &ve_clk.common.hw,
+ [CLK_CODEC] = &codec_clk.common.hw,
+ [CLK_AVS] = &avs_clk.common.hw,
+ [CLK_ACE] = &ace_clk.common.hw,
+ [CLK_HDMI] = &hdmi_clk.common.hw,
+ [CLK_GPU] = &gpu_sun7i_clk.common.hw,
+ [CLK_MBUS] = &mbus_sun7i_clk.common.hw,
+ [CLK_HDMI1_SLOW] = &hdmi1_slow_clk.common.hw,
+ [CLK_HDMI1] = &hdmi1_clk.common.hw,
+ [CLK_OUT_A] = &out_a_clk.common.hw,
+ [CLK_OUT_B] = &out_b_clk.common.hw,
+ },
+ .num = CLK_NUMBER_SUN7I,
+};
+
+static struct ccu_reset_map sunxi_a10_a20_ccu_resets[] = {
+ [RST_USB_PHY0] = { 0x0cc, BIT(0) },
+ [RST_USB_PHY1] = { 0x0cc, BIT(1) },
+ [RST_USB_PHY2] = { 0x0cc, BIT(2) },
+ [RST_GPS] = { 0x0d0, BIT(0) },
+ [RST_DE_BE0] = { 0x104, BIT(30) },
+ [RST_DE_BE1] = { 0x108, BIT(30) },
+ [RST_DE_FE0] = { 0x10c, BIT(30) },
+ [RST_DE_FE1] = { 0x110, BIT(30) },
+ [RST_DE_MP] = { 0x114, BIT(30) },
+ [RST_TVE0] = { 0x118, BIT(29) },
+ [RST_TCON0] = { 0x118, BIT(30) },
+ [RST_TVE1] = { 0x11c, BIT(29) },
+ [RST_TCON1] = { 0x11c, BIT(30) },
+ [RST_CSI0] = { 0x134, BIT(30) },
+ [RST_CSI1] = { 0x138, BIT(30) },
+ [RST_VE] = { 0x13c, BIT(0) },
+ [RST_ACE] = { 0x148, BIT(16) },
+ [RST_LVDS] = { 0x14c, BIT(0) },
+ [RST_GPU] = { 0x154, BIT(30) },
+ [RST_HDMI_H] = { 0x170, BIT(0) },
+ [RST_HDMI_SYS] = { 0x170, BIT(1) },
+ [RST_HDMI_AUDIO_DMA] = { 0x170, BIT(2) },
+};
+
+static const struct sunxi_ccu_desc sun4i_a10_ccu_desc = {
+ .ccu_clks = sun4i_sun7i_ccu_clks,
+ .num_ccu_clks = ARRAY_SIZE(sun4i_sun7i_ccu_clks),
+
+ .hw_clks = &sun4i_a10_hw_clks,
+
+ .resets = sunxi_a10_a20_ccu_resets,
+ .num_resets = ARRAY_SIZE(sunxi_a10_a20_ccu_resets),
+};
+
+static const struct sunxi_ccu_desc sun7i_a20_ccu_desc = {
+ .ccu_clks = sun4i_sun7i_ccu_clks,
+ .num_ccu_clks = ARRAY_SIZE(sun4i_sun7i_ccu_clks),
+
+ .hw_clks = &sun7i_a20_hw_clks,
+
+ .resets = sunxi_a10_a20_ccu_resets,
+ .num_resets = ARRAY_SIZE(sunxi_a10_a20_ccu_resets),
+};
+
+static void __init sun4i_ccu_init(struct device_node *node,
+ const struct sunxi_ccu_desc *desc)
+{
+ void __iomem *reg;
+ u32 val;
+
+ reg = of_io_request_and_map(node, 0, of_node_full_name(node));
+ if (IS_ERR(reg)) {
+ pr_err("%s: Could not map the clock registers\n",
+ of_node_full_name(node));
+ return;
+ }
+
+ /* Force the PLL-Audio-1x divider to 4 */
+ val = readl(reg + SUN4I_PLL_AUDIO_REG);
+ val &= ~GENMASK(29, 26);
+ writel(val | (4 << 26), reg + SUN4I_PLL_AUDIO_REG);
+
+ /*
+ * Use the peripheral PLL6 as the AHB parent, instead of CPU /
+ * AXI which have rate changes due to cpufreq.
+ *
+ * This is especially a big deal for the HS timer whose parent
+ * clock is AHB.
+ *
+ * NB! These bits are undocumented in A10 manual.
+ */
+ val = readl(reg + SUN4I_AHB_REG);
+ val &= ~GENMASK(7, 6);
+ writel(val | (2 << 6), reg + SUN4I_AHB_REG);
+
+ sunxi_ccu_probe(node, reg, desc);
+}
+
+static void __init sun4i_a10_ccu_setup(struct device_node *node)
+{
+ sun4i_ccu_init(node, &sun4i_a10_ccu_desc);
+}
+CLK_OF_DECLARE(sun4i_a10_ccu, "allwinner,sun4i-a10-ccu",
+ sun4i_a10_ccu_setup);
+
+static void __init sun7i_a20_ccu_setup(struct device_node *node)
+{
+ sun4i_ccu_init(node, &sun7i_a20_ccu_desc);
+}
+CLK_OF_DECLARE(sun7i_a20_ccu, "allwinner,sun7i-a20-ccu",
+ sun7i_a20_ccu_setup);
diff --git a/drivers/clk/sunxi-ng/ccu-sun4i-a10.h b/drivers/clk/sunxi-ng/ccu-sun4i-a10.h
new file mode 100644
index 000000000000..c5947c7c050e
--- /dev/null
+++ b/drivers/clk/sunxi-ng/ccu-sun4i-a10.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright 2017 Priit Laes
+ *
+ * Priit Laes <plaes@plaes.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CCU_SUN4I_A10_H_
+#define _CCU_SUN4I_A10_H_
+
+#include <dt-bindings/clock/sun4i-a10-ccu.h>
+#include <dt-bindings/clock/sun7i-a20-ccu.h>
+#include <dt-bindings/reset/sun4i-a10-ccu.h>
+
+/* The HOSC is exported */
+#define CLK_PLL_CORE 2
+#define CLK_PLL_AUDIO_BASE 3
+#define CLK_PLL_AUDIO 4
+#define CLK_PLL_AUDIO_2X 5
+#define CLK_PLL_AUDIO_4X 6
+#define CLK_PLL_AUDIO_8X 7
+#define CLK_PLL_VIDEO0 8
+#define CLK_PLL_VIDEO0_2X 9
+#define CLK_PLL_VE 10
+#define CLK_PLL_DDR_BASE 11
+#define CLK_PLL_DDR 12
+#define CLK_PLL_DDR_OTHER 13
+#define CLK_PLL_PERIPH_BASE 14
+#define CLK_PLL_PERIPH 15
+#define CLK_PLL_PERIPH_SATA 16
+#define CLK_PLL_VIDEO1 17
+#define CLK_PLL_VIDEO1_2X 18
+#define CLK_PLL_GPU 19
+
+/* The CPU clock is exported */
+#define CLK_AXI 21
+#define CLK_AXI_DRAM 22
+#define CLK_AHB 23
+#define CLK_APB0 24
+#define CLK_APB1 25
+
+/* AHB gates are exported (23..68) */
+/* APB0 gates are exported (69..78) */
+/* APB1 gates are exported (79..95) */
+/* IP module clocks are exported (96..128) */
+/* DRAM gates are exported (129..142)*/
+/* Media (display engine clocks & etc) are exported (143..169) */
+
+#define CLK_NUMBER_SUN4I (CLK_MBUS + 1)
+#define CLK_NUMBER_SUN7I (CLK_OUT_B + 1)
+
+#endif /* _CCU_SUN4I_A10_H_ */
diff --git a/drivers/clk/sunxi-ng/ccu-sun5i.c b/drivers/clk/sunxi-ng/ccu-sun5i.c
index 31d7ffda9aab..ab9e850b3707 100644
--- a/drivers/clk/sunxi-ng/ccu-sun5i.c
+++ b/drivers/clk/sunxi-ng/ccu-sun5i.c
@@ -976,8 +976,7 @@ static void __init sun5i_ccu_init(struct device_node *node,
reg = of_io_request_and_map(node, 0, of_node_full_name(node));
if (IS_ERR(reg)) {
- pr_err("%s: Could not map the clock registers\n",
- of_node_full_name(node));
+ pr_err("%pOF: Could not map the clock registers\n", node);
return;
}
diff --git a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
index 4d6078fca9ac..8af434815fba 100644
--- a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
+++ b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
@@ -1217,8 +1217,7 @@ static void __init sun6i_a31_ccu_setup(struct device_node *node)
reg = of_io_request_and_map(node, 0, of_node_full_name(node));
if (IS_ERR(reg)) {
- pr_err("%s: Could not map the clock registers\n",
- of_node_full_name(node));
+ pr_err("%pOF: Could not map the clock registers\n", node);
return;
}
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-a23.c b/drivers/clk/sunxi-ng/ccu-sun8i-a23.c
index 8a753ed0426d..d93b452f0df9 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-a23.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-a23.c
@@ -716,8 +716,7 @@ static void __init sun8i_a23_ccu_setup(struct device_node *node)
reg = of_io_request_and_map(node, 0, of_node_full_name(node));
if (IS_ERR(reg)) {
- pr_err("%s: Could not map the clock registers\n",
- of_node_full_name(node));
+ pr_err("%pOF: Could not map the clock registers\n", node);
return;
}
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-a33.c b/drivers/clk/sunxi-ng/ccu-sun8i-a33.c
index 10b38dc46f75..13eb5b23c5e7 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-a33.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-a33.c
@@ -777,8 +777,7 @@ static void __init sun8i_a33_ccu_setup(struct device_node *node)
reg = of_io_request_and_map(node, 0, of_node_full_name(node));
if (IS_ERR(reg)) {
- pr_err("%s: Could not map the clock registers\n",
- of_node_full_name(node));
+ pr_err("%pOF: Could not map the clock registers\n", node);
return;
}
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-a83t.c b/drivers/clk/sunxi-ng/ccu-sun8i-a83t.c
index 947f9f6e05d2..e43acebdfbcd 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-a83t.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-a83t.c
@@ -418,14 +418,8 @@ static SUNXI_CCU_PHASE(mmc1_sample_clk, "mmc1-sample", "mmc1",
static SUNXI_CCU_PHASE(mmc1_output_clk, "mmc1-output", "mmc1",
0x08c, 8, 3, 0);
-/* TODO Support MMC2 clock's new timing mode. */
-static SUNXI_CCU_MP_WITH_MUX_GATE(mmc2_clk, "mmc2", mod0_default_parents,
- 0x090,
- 0, 4, /* M */
- 16, 2, /* P */
- 24, 2, /* mux */
- BIT(31), /* gate */
- 0);
+static SUNXI_CCU_MP_MMC_WITH_MUX_GATE(mmc2_clk, "mmc2", mod0_default_parents,
+ 0x090, 0);
static SUNXI_CCU_PHASE(mmc2_sample_clk, "mmc2-sample", "mmc2",
0x090, 20, 3, 0);
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-h3.c b/drivers/clk/sunxi-ng/ccu-sun8i-h3.c
index 62e4f0d2b2fc..1729ff6a5aae 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-h3.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-h3.c
@@ -135,7 +135,7 @@ static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_de_clk, "pll-de",
static const char * const cpux_parents[] = { "osc32k", "osc24M",
"pll-cpux" , "pll-cpux" };
static SUNXI_CCU_MUX(cpux_clk, "cpux", cpux_parents,
- 0x050, 16, 2, CLK_IS_CRITICAL);
+ 0x050, 16, 2, CLK_IS_CRITICAL | CLK_SET_RATE_PARENT);
static SUNXI_CCU_M(axi_clk, "axi", "cpux", 0x050, 0, 2, 0);
@@ -1103,6 +1103,13 @@ static const struct sunxi_ccu_desc sun50i_h5_ccu_desc = {
.num_resets = ARRAY_SIZE(sun50i_h5_ccu_resets),
};
+static struct ccu_pll_nb sun8i_h3_pll_cpu_nb = {
+ .common = &pll_cpux_clk.common,
+ /* copy from pll_cpux_clk */
+ .enable = BIT(31),
+ .lock = BIT(28),
+};
+
static struct ccu_mux_nb sun8i_h3_cpu_nb = {
.common = &cpux_clk.common,
.cm = &cpux_clk.mux,
@@ -1118,8 +1125,7 @@ static void __init sunxi_h3_h5_ccu_init(struct device_node *node,
reg = of_io_request_and_map(node, 0, of_node_full_name(node));
if (IS_ERR(reg)) {
- pr_err("%s: Could not map the clock registers\n",
- of_node_full_name(node));
+ pr_err("%pOF: Could not map the clock registers\n", node);
return;
}
@@ -1130,6 +1136,10 @@ static void __init sunxi_h3_h5_ccu_init(struct device_node *node,
sunxi_ccu_probe(node, reg, desc);
+ /* Gate then ungate PLL CPU after any rate changes */
+ ccu_pll_notifier_register(&sun8i_h3_pll_cpu_nb);
+
+ /* Reparent CPU during PLL CPU rate changes */
ccu_mux_notifier_register(pll_cpux_clk.common.hw.clk,
&sun8i_h3_cpu_nb);
}
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-r.c b/drivers/clk/sunxi-ng/ccu-sun8i-r.c
index e54816ec1dbe..71feb7b24e8a 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-r.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-r.c
@@ -290,8 +290,7 @@ static void __init sunxi_r_ccu_init(struct device_node *node,
reg = of_io_request_and_map(node, 0, of_node_full_name(node));
if (IS_ERR(reg)) {
- pr_err("%s: Could not map the clock registers\n",
- of_node_full_name(node));
+ pr_err("%pOF: Could not map the clock registers\n", node);
return;
}
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-r.h b/drivers/clk/sunxi-ng/ccu-sun8i-r.h
index a7a407f12b56..fb01bffb929d 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-r.h
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-r.h
@@ -13,7 +13,7 @@
*/
#ifndef _CCU_SUN8I_R_H
-#define _CCU_SUN8I_R_H_
+#define _CCU_SUN8I_R_H
#include <dt-bindings/clock/sun8i-r-ccu.h>
#include <dt-bindings/reset/sun8i-r-ccu.h>
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-r40.c b/drivers/clk/sunxi-ng/ccu-sun8i-r40.c
new file mode 100644
index 000000000000..933f2e68f42a
--- /dev/null
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-r40.c
@@ -0,0 +1,1290 @@
+/*
+ * Copyright (c) 2017 Icenowy Zheng <icenowy@aosc.io>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/of_address.h>
+
+#include "ccu_common.h"
+#include "ccu_reset.h"
+
+#include "ccu_div.h"
+#include "ccu_gate.h"
+#include "ccu_mp.h"
+#include "ccu_mult.h"
+#include "ccu_nk.h"
+#include "ccu_nkm.h"
+#include "ccu_nkmp.h"
+#include "ccu_nm.h"
+#include "ccu_phase.h"
+
+#include "ccu-sun8i-r40.h"
+
+/* TODO: The result of N*K is required to be in [10, 88] range. */
+static struct ccu_nkmp pll_cpu_clk = {
+ .enable = BIT(31),
+ .lock = BIT(28),
+ .n = _SUNXI_CCU_MULT(8, 5),
+ .k = _SUNXI_CCU_MULT(4, 2),
+ .m = _SUNXI_CCU_DIV(0, 2),
+ .p = _SUNXI_CCU_DIV_MAX(16, 2, 4),
+ .common = {
+ .reg = 0x000,
+ .hw.init = CLK_HW_INIT("pll-cpu",
+ "osc24M",
+ &ccu_nkmp_ops,
+ CLK_SET_RATE_UNGATE),
+ },
+};
+
+/*
+ * The Audio PLL is supposed to have 4 outputs: 3 fixed factors from
+ * the base (2x, 4x and 8x), and one variable divider (the one true
+ * pll audio).
+ *
+ * We don't have any need for the variable divider for now, so we just
+ * hardcode it to match with the clock names
+ */
+#define SUN8I_R40_PLL_AUDIO_REG 0x008
+
+static SUNXI_CCU_NM_WITH_GATE_LOCK(pll_audio_base_clk, "pll-audio-base",
+ "osc24M", 0x008,
+ 8, 7, /* N */
+ 0, 5, /* M */
+ BIT(31), /* gate */
+ BIT(28), /* lock */
+ CLK_SET_RATE_UNGATE);
+
+/* TODO: The result of N/M is required to be in [8, 25] range. */
+static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_video0_clk, "pll-video0",
+ "osc24M", 0x0010,
+ 8, 7, /* N */
+ 0, 4, /* M */
+ BIT(24), /* frac enable */
+ BIT(25), /* frac select */
+ 270000000, /* frac rate 0 */
+ 297000000, /* frac rate 1 */
+ BIT(31), /* gate */
+ BIT(28), /* lock */
+ CLK_SET_RATE_UNGATE);
+
+/* TODO: The result of N/M is required to be in [8, 25] range. */
+static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_ve_clk, "pll-ve",
+ "osc24M", 0x0018,
+ 8, 7, /* N */
+ 0, 4, /* M */
+ BIT(24), /* frac enable */
+ BIT(25), /* frac select */
+ 270000000, /* frac rate 0 */
+ 297000000, /* frac rate 1 */
+ BIT(31), /* gate */
+ BIT(28), /* lock */
+ CLK_SET_RATE_UNGATE);
+
+/* TODO: The result of N*K is required to be in [10, 77] range. */
+static SUNXI_CCU_NKM_WITH_GATE_LOCK(pll_ddr0_clk, "pll-ddr0",
+ "osc24M", 0x020,
+ 8, 5, /* N */
+ 4, 2, /* K */
+ 0, 2, /* M */
+ BIT(31), /* gate */
+ BIT(28), /* lock */
+ CLK_SET_RATE_UNGATE);
+
+/* TODO: The result of N*K is required to be in [21, 58] range. */
+static struct ccu_nk pll_periph0_clk = {
+ .enable = BIT(31),
+ .lock = BIT(28),
+ .n = _SUNXI_CCU_MULT(8, 5),
+ .k = _SUNXI_CCU_MULT(4, 2),
+ .fixed_post_div = 2,
+ .common = {
+ .reg = 0x028,
+ .features = CCU_FEATURE_FIXED_POSTDIV,
+ .hw.init = CLK_HW_INIT("pll-periph0", "osc24M",
+ &ccu_nk_ops,
+ CLK_SET_RATE_UNGATE),
+ },
+};
+
+static struct ccu_div pll_periph0_sata_clk = {
+ .enable = BIT(24),
+ .div = _SUNXI_CCU_DIV(0, 2),
+ /*
+ * The formula of pll-periph0 (1x) is 24MHz*N*K/2, and the formula
+ * of pll-periph0-sata is 24MHz*N*K/M/6, so the postdiv here is
+ * 6/2 = 3.
+ */
+ .fixed_post_div = 3,
+ .common = {
+ .reg = 0x028,
+ .features = CCU_FEATURE_FIXED_POSTDIV,
+ .hw.init = CLK_HW_INIT("pll-periph0-sata",
+ "pll-periph0",
+ &ccu_div_ops, 0),
+ },
+};
+
+/* TODO: The result of N*K is required to be in [21, 58] range. */
+static struct ccu_nk pll_periph1_clk = {
+ .enable = BIT(31),
+ .lock = BIT(28),
+ .n = _SUNXI_CCU_MULT(8, 5),
+ .k = _SUNXI_CCU_MULT(4, 2),
+ .fixed_post_div = 2,
+ .common = {
+ .reg = 0x02c,
+ .features = CCU_FEATURE_FIXED_POSTDIV,
+ .hw.init = CLK_HW_INIT("pll-periph1", "osc24M",
+ &ccu_nk_ops,
+ CLK_SET_RATE_UNGATE),
+ },
+};
+
+/* TODO: The result of N/M is required to be in [8, 25] range. */
+static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_video1_clk, "pll-video1",
+ "osc24M", 0x030,
+ 8, 7, /* N */
+ 0, 4, /* M */
+ BIT(24), /* frac enable */
+ BIT(25), /* frac select */
+ 270000000, /* frac rate 0 */
+ 297000000, /* frac rate 1 */
+ BIT(31), /* gate */
+ BIT(28), /* lock */
+ CLK_SET_RATE_UNGATE);
+
+static struct ccu_nkm pll_sata_clk = {
+ .enable = BIT(31),
+ .lock = BIT(28),
+ .n = _SUNXI_CCU_MULT(8, 5),
+ .k = _SUNXI_CCU_MULT(4, 2),
+ .m = _SUNXI_CCU_DIV(0, 2),
+ .fixed_post_div = 6,
+ .common = {
+ .reg = 0x034,
+ .features = CCU_FEATURE_FIXED_POSTDIV,
+ .hw.init = CLK_HW_INIT("pll-sata", "osc24M",
+ &ccu_nkm_ops,
+ CLK_SET_RATE_UNGATE),
+ },
+};
+
+static const char * const pll_sata_out_parents[] = { "pll-sata",
+ "pll-periph0-sata" };
+static SUNXI_CCU_MUX_WITH_GATE(pll_sata_out_clk, "pll-sata-out",
+ pll_sata_out_parents, 0x034,
+ 30, 1, /* mux */
+ BIT(14), /* gate */
+ CLK_SET_RATE_PARENT);
+
+/* TODO: The result of N/M is required to be in [8, 25] range. */
+static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_gpu_clk, "pll-gpu",
+ "osc24M", 0x038,
+ 8, 7, /* N */
+ 0, 4, /* M */
+ BIT(24), /* frac enable */
+ BIT(25), /* frac select */
+ 270000000, /* frac rate 0 */
+ 297000000, /* frac rate 1 */
+ BIT(31), /* gate */
+ BIT(28), /* lock */
+ CLK_SET_RATE_UNGATE);
+
+/*
+ * The MIPI PLL has 2 modes: "MIPI" and "HDMI".
+ *
+ * The MIPI mode is a standard NKM-style clock. The HDMI mode is an
+ * integer / fractional clock with switchable multipliers and dividers.
+ * This is not supported here. We hardcode the PLL to MIPI mode.
+ *
+ * TODO: In the MIPI mode, M/N is required to be equal or lesser than 3,
+ * which cannot be implemented now.
+ */
+#define SUN8I_R40_PLL_MIPI_REG 0x040
+
+static const char * const pll_mipi_parents[] = { "pll-video0" };
+static struct ccu_nkm pll_mipi_clk = {
+ .enable = BIT(31) | BIT(23) | BIT(22),
+ .lock = BIT(28),
+ .n = _SUNXI_CCU_MULT(8, 4),
+ .k = _SUNXI_CCU_MULT_MIN(4, 2, 2),
+ .m = _SUNXI_CCU_DIV(0, 4),
+ .mux = _SUNXI_CCU_MUX(21, 1),
+ .common = {
+ .reg = 0x040,
+ .hw.init = CLK_HW_INIT_PARENTS("pll-mipi",
+ pll_mipi_parents,
+ &ccu_nkm_ops,
+ CLK_SET_RATE_UNGATE)
+ },
+};
+
+/* TODO: The result of N/M is required to be in [8, 25] range. */
+static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_de_clk, "pll-de",
+ "osc24M", 0x048,
+ 8, 7, /* N */
+ 0, 4, /* M */
+ BIT(24), /* frac enable */
+ BIT(25), /* frac select */
+ 270000000, /* frac rate 0 */
+ 297000000, /* frac rate 1 */
+ BIT(31), /* gate */
+ BIT(28), /* lock */
+ CLK_SET_RATE_UNGATE);
+
+/* TODO: The N factor is required to be in [16, 75] range. */
+static SUNXI_CCU_NM_WITH_GATE_LOCK(pll_ddr1_clk, "pll-ddr1",
+ "osc24M", 0x04c,
+ 8, 7, /* N */
+ 0, 2, /* M */
+ BIT(31), /* gate */
+ BIT(28), /* lock */
+ CLK_SET_RATE_UNGATE);
+
+static const char * const cpu_parents[] = { "osc32k", "osc24M",
+ "pll-cpu", "pll-cpu" };
+static SUNXI_CCU_MUX(cpu_clk, "cpu", cpu_parents,
+ 0x050, 16, 2, CLK_IS_CRITICAL | CLK_SET_RATE_PARENT);
+
+static SUNXI_CCU_M(axi_clk, "axi", "cpu", 0x050, 0, 2, 0);
+
+static const char * const ahb1_parents[] = { "osc32k", "osc24M",
+ "axi", "pll-periph0" };
+static const struct ccu_mux_var_prediv ahb1_predivs[] = {
+ { .index = 3, .shift = 6, .width = 2 },
+};
+static struct ccu_div ahb1_clk = {
+ .div = _SUNXI_CCU_DIV_FLAGS(4, 2, CLK_DIVIDER_POWER_OF_TWO),
+
+ .mux = {
+ .shift = 12,
+ .width = 2,
+
+ .var_predivs = ahb1_predivs,
+ .n_var_predivs = ARRAY_SIZE(ahb1_predivs),
+ },
+
+ .common = {
+ .reg = 0x054,
+ .features = CCU_FEATURE_VARIABLE_PREDIV,
+ .hw.init = CLK_HW_INIT_PARENTS("ahb1",
+ ahb1_parents,
+ &ccu_div_ops,
+ 0),
+ },
+};
+
+static struct clk_div_table apb1_div_table[] = {
+ { .val = 0, .div = 2 },
+ { .val = 1, .div = 2 },
+ { .val = 2, .div = 4 },
+ { .val = 3, .div = 8 },
+ { /* Sentinel */ },
+};
+static SUNXI_CCU_DIV_TABLE(apb1_clk, "apb1", "ahb1",
+ 0x054, 8, 2, apb1_div_table, 0);
+
+static const char * const apb2_parents[] = { "osc32k", "osc24M",
+ "pll-periph0-2x",
+ "pll-periph0-2x" };
+static SUNXI_CCU_MP_WITH_MUX(apb2_clk, "apb2", apb2_parents, 0x058,
+ 0, 5, /* M */
+ 16, 2, /* P */
+ 24, 2, /* mux */
+ 0);
+
+static SUNXI_CCU_GATE(bus_mipi_dsi_clk, "bus-mipi-dsi", "ahb1",
+ 0x060, BIT(1), 0);
+static SUNXI_CCU_GATE(bus_ce_clk, "bus-ce", "ahb1",
+ 0x060, BIT(5), 0);
+static SUNXI_CCU_GATE(bus_dma_clk, "bus-dma", "ahb1",
+ 0x060, BIT(6), 0);
+static SUNXI_CCU_GATE(bus_mmc0_clk, "bus-mmc0", "ahb1",
+ 0x060, BIT(8), 0);
+static SUNXI_CCU_GATE(bus_mmc1_clk, "bus-mmc1", "ahb1",
+ 0x060, BIT(9), 0);
+static SUNXI_CCU_GATE(bus_mmc2_clk, "bus-mmc2", "ahb1",
+ 0x060, BIT(10), 0);
+static SUNXI_CCU_GATE(bus_mmc3_clk, "bus-mmc3", "ahb1",
+ 0x060, BIT(11), 0);
+static SUNXI_CCU_GATE(bus_nand_clk, "bus-nand", "ahb1",
+ 0x060, BIT(13), 0);
+static SUNXI_CCU_GATE(bus_dram_clk, "bus-dram", "ahb1",
+ 0x060, BIT(14), 0);
+static SUNXI_CCU_GATE(bus_emac_clk, "bus-emac", "ahb1",
+ 0x060, BIT(17), 0);
+static SUNXI_CCU_GATE(bus_ts_clk, "bus-ts", "ahb1",
+ 0x060, BIT(18), 0);
+static SUNXI_CCU_GATE(bus_hstimer_clk, "bus-hstimer", "ahb1",
+ 0x060, BIT(19), 0);
+static SUNXI_CCU_GATE(bus_spi0_clk, "bus-spi0", "ahb1",
+ 0x060, BIT(20), 0);
+static SUNXI_CCU_GATE(bus_spi1_clk, "bus-spi1", "ahb1",
+ 0x060, BIT(21), 0);
+static SUNXI_CCU_GATE(bus_spi2_clk, "bus-spi2", "ahb1",
+ 0x060, BIT(22), 0);
+static SUNXI_CCU_GATE(bus_spi3_clk, "bus-spi3", "ahb1",
+ 0x060, BIT(23), 0);
+static SUNXI_CCU_GATE(bus_sata_clk, "bus-sata", "ahb1",
+ 0x060, BIT(24), 0);
+static SUNXI_CCU_GATE(bus_otg_clk, "bus-otg", "ahb1",
+ 0x060, BIT(25), 0);
+static SUNXI_CCU_GATE(bus_ehci0_clk, "bus-ehci0", "ahb1",
+ 0x060, BIT(26), 0);
+static SUNXI_CCU_GATE(bus_ehci1_clk, "bus-ehci1", "ahb1",
+ 0x060, BIT(27), 0);
+static SUNXI_CCU_GATE(bus_ehci2_clk, "bus-ehci2", "ahb1",
+ 0x060, BIT(28), 0);
+static SUNXI_CCU_GATE(bus_ohci0_clk, "bus-ohci0", "ahb1",
+ 0x060, BIT(29), 0);
+static SUNXI_CCU_GATE(bus_ohci1_clk, "bus-ohci1", "ahb1",
+ 0x060, BIT(30), 0);
+static SUNXI_CCU_GATE(bus_ohci2_clk, "bus-ohci2", "ahb1",
+ 0x060, BIT(31), 0);
+
+static SUNXI_CCU_GATE(bus_ve_clk, "bus-ve", "ahb1",
+ 0x064, BIT(0), 0);
+static SUNXI_CCU_GATE(bus_mp_clk, "bus-mp", "ahb1",
+ 0x064, BIT(2), 0);
+static SUNXI_CCU_GATE(bus_deinterlace_clk, "bus-deinterlace", "ahb1",
+ 0x064, BIT(5), 0);
+static SUNXI_CCU_GATE(bus_csi0_clk, "bus-csi0", "ahb1",
+ 0x064, BIT(8), 0);
+static SUNXI_CCU_GATE(bus_csi1_clk, "bus-csi1", "ahb1",
+ 0x064, BIT(9), 0);
+static SUNXI_CCU_GATE(bus_hdmi0_clk, "bus-hdmi0", "ahb1",
+ 0x064, BIT(10), 0);
+static SUNXI_CCU_GATE(bus_hdmi1_clk, "bus-hdmi1", "ahb1",
+ 0x064, BIT(11), 0);
+static SUNXI_CCU_GATE(bus_de_clk, "bus-de", "ahb1",
+ 0x064, BIT(12), 0);
+static SUNXI_CCU_GATE(bus_tve0_clk, "bus-tve0", "ahb1",
+ 0x064, BIT(13), 0);
+static SUNXI_CCU_GATE(bus_tve1_clk, "bus-tve1", "ahb1",
+ 0x064, BIT(14), 0);
+static SUNXI_CCU_GATE(bus_tve_top_clk, "bus-tve-top", "ahb1",
+ 0x064, BIT(15), 0);
+static SUNXI_CCU_GATE(bus_gmac_clk, "bus-gmac", "ahb1",
+ 0x064, BIT(17), 0);
+static SUNXI_CCU_GATE(bus_gpu_clk, "bus-gpu", "ahb1",
+ 0x064, BIT(20), 0);
+static SUNXI_CCU_GATE(bus_tvd0_clk, "bus-tvd0", "ahb1",
+ 0x064, BIT(21), 0);
+static SUNXI_CCU_GATE(bus_tvd1_clk, "bus-tvd1", "ahb1",
+ 0x064, BIT(22), 0);
+static SUNXI_CCU_GATE(bus_tvd2_clk, "bus-tvd2", "ahb1",
+ 0x064, BIT(23), 0);
+static SUNXI_CCU_GATE(bus_tvd3_clk, "bus-tvd3", "ahb1",
+ 0x064, BIT(24), 0);
+static SUNXI_CCU_GATE(bus_tvd_top_clk, "bus-tvd-top", "ahb1",
+ 0x064, BIT(25), 0);
+static SUNXI_CCU_GATE(bus_tcon_lcd0_clk, "bus-tcon-lcd0", "ahb1",
+ 0x064, BIT(26), 0);
+static SUNXI_CCU_GATE(bus_tcon_lcd1_clk, "bus-tcon-lcd1", "ahb1",
+ 0x064, BIT(27), 0);
+static SUNXI_CCU_GATE(bus_tcon_tv0_clk, "bus-tcon-tv0", "ahb1",
+ 0x064, BIT(28), 0);
+static SUNXI_CCU_GATE(bus_tcon_tv1_clk, "bus-tcon-tv1", "ahb1",
+ 0x064, BIT(29), 0);
+static SUNXI_CCU_GATE(bus_tcon_top_clk, "bus-tcon-top", "ahb1",
+ 0x064, BIT(30), 0);
+
+static SUNXI_CCU_GATE(bus_codec_clk, "bus-codec", "apb1",
+ 0x068, BIT(0), 0);
+static SUNXI_CCU_GATE(bus_spdif_clk, "bus-spdif", "apb1",
+ 0x068, BIT(1), 0);
+static SUNXI_CCU_GATE(bus_ac97_clk, "bus-ac97", "apb1",
+ 0x068, BIT(2), 0);
+static SUNXI_CCU_GATE(bus_pio_clk, "bus-pio", "apb1",
+ 0x068, BIT(5), 0);
+static SUNXI_CCU_GATE(bus_ir0_clk, "bus-ir0", "apb1",
+ 0x068, BIT(6), 0);
+static SUNXI_CCU_GATE(bus_ir1_clk, "bus-ir1", "apb1",
+ 0x068, BIT(7), 0);
+static SUNXI_CCU_GATE(bus_ths_clk, "bus-ths", "apb1",
+ 0x068, BIT(8), 0);
+static SUNXI_CCU_GATE(bus_keypad_clk, "bus-keypad", "apb1",
+ 0x068, BIT(10), 0);
+static SUNXI_CCU_GATE(bus_i2s0_clk, "bus-i2s0", "apb1",
+ 0x068, BIT(12), 0);
+static SUNXI_CCU_GATE(bus_i2s1_clk, "bus-i2s1", "apb1",
+ 0x068, BIT(13), 0);
+static SUNXI_CCU_GATE(bus_i2s2_clk, "bus-i2s2", "apb1",
+ 0x068, BIT(14), 0);
+
+static SUNXI_CCU_GATE(bus_i2c0_clk, "bus-i2c0", "apb2",
+ 0x06c, BIT(0), 0);
+static SUNXI_CCU_GATE(bus_i2c1_clk, "bus-i2c1", "apb2",
+ 0x06c, BIT(1), 0);
+static SUNXI_CCU_GATE(bus_i2c2_clk, "bus-i2c2", "apb2",
+ 0x06c, BIT(2), 0);
+static SUNXI_CCU_GATE(bus_i2c3_clk, "bus-i2c3", "apb2",
+ 0x06c, BIT(3), 0);
+/*
+ * In datasheet here's "Reserved", however the gate exists in BSP soucre
+ * code.
+ */
+static SUNXI_CCU_GATE(bus_can_clk, "bus-can", "apb2",
+ 0x06c, BIT(4), 0);
+static SUNXI_CCU_GATE(bus_scr_clk, "bus-scr", "apb2",
+ 0x06c, BIT(5), 0);
+static SUNXI_CCU_GATE(bus_ps20_clk, "bus-ps20", "apb2",
+ 0x06c, BIT(6), 0);
+static SUNXI_CCU_GATE(bus_ps21_clk, "bus-ps21", "apb2",
+ 0x06c, BIT(7), 0);
+static SUNXI_CCU_GATE(bus_i2c4_clk, "bus-i2c4", "apb2",
+ 0x06c, BIT(15), 0);
+static SUNXI_CCU_GATE(bus_uart0_clk, "bus-uart0", "apb2",
+ 0x06c, BIT(16), 0);
+static SUNXI_CCU_GATE(bus_uart1_clk, "bus-uart1", "apb2",
+ 0x06c, BIT(17), 0);
+static SUNXI_CCU_GATE(bus_uart2_clk, "bus-uart2", "apb2",
+ 0x06c, BIT(18), 0);
+static SUNXI_CCU_GATE(bus_uart3_clk, "bus-uart3", "apb2",
+ 0x06c, BIT(19), 0);
+static SUNXI_CCU_GATE(bus_uart4_clk, "bus-uart4", "apb2",
+ 0x06c, BIT(20), 0);
+static SUNXI_CCU_GATE(bus_uart5_clk, "bus-uart5", "apb2",
+ 0x06c, BIT(21), 0);
+static SUNXI_CCU_GATE(bus_uart6_clk, "bus-uart6", "apb2",
+ 0x06c, BIT(22), 0);
+static SUNXI_CCU_GATE(bus_uart7_clk, "bus-uart7", "apb2",
+ 0x06c, BIT(23), 0);
+
+static SUNXI_CCU_GATE(bus_dbg_clk, "bus-dbg", "ahb1",
+ 0x070, BIT(7), 0);
+
+static const char * const ths_parents[] = { "osc24M" };
+static struct ccu_div ths_clk = {
+ .enable = BIT(31),
+ .div = _SUNXI_CCU_DIV_FLAGS(0, 2, CLK_DIVIDER_POWER_OF_TWO),
+ .mux = _SUNXI_CCU_MUX(24, 2),
+ .common = {
+ .reg = 0x074,
+ .hw.init = CLK_HW_INIT_PARENTS("ths",
+ ths_parents,
+ &ccu_div_ops,
+ 0),
+ },
+};
+
+static const char * const mod0_default_parents[] = { "osc24M", "pll-periph0",
+ "pll-periph1" };
+static SUNXI_CCU_MP_WITH_MUX_GATE(nand_clk, "nand", mod0_default_parents, 0x080,
+ 0, 4, /* M */
+ 16, 2, /* P */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_MP_WITH_MUX_GATE(mmc0_clk, "mmc0", mod0_default_parents, 0x088,
+ 0, 4, /* M */
+ 16, 2, /* P */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_MP_WITH_MUX_GATE(mmc1_clk, "mmc1", mod0_default_parents, 0x08c,
+ 0, 4, /* M */
+ 16, 2, /* P */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_MP_WITH_MUX_GATE(mmc2_clk, "mmc2", mod0_default_parents, 0x090,
+ 0, 4, /* M */
+ 16, 2, /* P */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_MP_WITH_MUX_GATE(mmc3_clk, "mmc3", mod0_default_parents, 0x094,
+ 0, 4, /* M */
+ 16, 2, /* P */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static const char * const ts_parents[] = { "osc24M", "pll-periph0", };
+static SUNXI_CCU_MP_WITH_MUX_GATE(ts_clk, "ts", ts_parents, 0x098,
+ 0, 4, /* M */
+ 16, 2, /* P */
+ 24, 4, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static const char * const ce_parents[] = { "osc24M", "pll-periph0-2x",
+ "pll-periph1-2x" };
+static SUNXI_CCU_MP_WITH_MUX_GATE(ce_clk, "ce", ce_parents, 0x09c,
+ 0, 4, /* M */
+ 16, 2, /* P */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_MP_WITH_MUX_GATE(spi0_clk, "spi0", mod0_default_parents, 0x0a0,
+ 0, 4, /* M */
+ 16, 2, /* P */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_MP_WITH_MUX_GATE(spi1_clk, "spi1", mod0_default_parents, 0x0a4,
+ 0, 4, /* M */
+ 16, 2, /* P */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_MP_WITH_MUX_GATE(spi2_clk, "spi2", mod0_default_parents, 0x0a8,
+ 0, 4, /* M */
+ 16, 2, /* P */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_MP_WITH_MUX_GATE(spi3_clk, "spi3", mod0_default_parents, 0x0ac,
+ 0, 4, /* M */
+ 16, 2, /* P */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static const char * const i2s_parents[] = { "pll-audio-8x", "pll-audio-4x",
+ "pll-audio-2x", "pll-audio" };
+static SUNXI_CCU_MUX_WITH_GATE(i2s0_clk, "i2s0", i2s_parents,
+ 0x0b0, 16, 2, BIT(31), CLK_SET_RATE_PARENT);
+
+static SUNXI_CCU_MUX_WITH_GATE(i2s1_clk, "i2s1", i2s_parents,
+ 0x0b4, 16, 2, BIT(31), CLK_SET_RATE_PARENT);
+
+static SUNXI_CCU_MUX_WITH_GATE(i2s2_clk, "i2s2", i2s_parents,
+ 0x0b8, 16, 2, BIT(31), CLK_SET_RATE_PARENT);
+
+static SUNXI_CCU_MUX_WITH_GATE(ac97_clk, "ac97", i2s_parents,
+ 0x0bc, 16, 2, BIT(31), CLK_SET_RATE_PARENT);
+
+static SUNXI_CCU_MUX_WITH_GATE(spdif_clk, "spdif", i2s_parents,
+ 0x0c0, 16, 2, BIT(31), CLK_SET_RATE_PARENT);
+
+static const char * const keypad_parents[] = { "osc24M", "osc32k" };
+static const u8 keypad_table[] = { 0, 2 };
+static struct ccu_mp keypad_clk = {
+ .enable = BIT(31),
+ .m = _SUNXI_CCU_DIV(0, 5),
+ .p = _SUNXI_CCU_DIV(16, 2),
+ .mux = _SUNXI_CCU_MUX_TABLE(24, 2, keypad_table),
+ .common = {
+ .reg = 0x0c4,
+ .hw.init = CLK_HW_INIT_PARENTS("keypad",
+ keypad_parents,
+ &ccu_mp_ops,
+ 0),
+ }
+};
+
+static const char * const sata_parents[] = { "pll-sata-out", "sata-ext" };
+static SUNXI_CCU_MUX_WITH_GATE(sata_clk, "sata", sata_parents,
+ 0x0c8, 24, 1, BIT(31), CLK_SET_RATE_PARENT);
+
+/*
+ * There are 3 OHCI 12M clock source selection bits in this register.
+ * We will force them to 0 (12M divided from 48M).
+ */
+#define SUN8I_R40_USB_CLK_REG 0x0cc
+
+static SUNXI_CCU_GATE(usb_phy0_clk, "usb-phy0", "osc24M",
+ 0x0cc, BIT(8), 0);
+static SUNXI_CCU_GATE(usb_phy1_clk, "usb-phy1", "osc24M",
+ 0x0cc, BIT(9), 0);
+static SUNXI_CCU_GATE(usb_phy2_clk, "usb-phy2", "osc24M",
+ 0x0cc, BIT(10), 0);
+static SUNXI_CCU_GATE(usb_ohci0_clk, "usb-ohci0", "osc12M",
+ 0x0cc, BIT(16), 0);
+static SUNXI_CCU_GATE(usb_ohci1_clk, "usb-ohci1", "osc12M",
+ 0x0cc, BIT(17), 0);
+static SUNXI_CCU_GATE(usb_ohci2_clk, "usb-ohci2", "osc12M",
+ 0x0cc, BIT(18), 0);
+
+static const char * const ir_parents[] = { "osc24M", "pll-periph0",
+ "pll-periph1", "osc32k" };
+static SUNXI_CCU_MP_WITH_MUX_GATE(ir0_clk, "ir0", ir_parents, 0x0d0,
+ 0, 4, /* M */
+ 16, 2, /* P */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_MP_WITH_MUX_GATE(ir1_clk, "ir1", ir_parents, 0x0d4,
+ 0, 4, /* M */
+ 16, 2, /* P */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static const char * const dram_parents[] = { "pll-ddr0", "pll-ddr1" };
+static SUNXI_CCU_M_WITH_MUX(dram_clk, "dram", dram_parents,
+ 0x0f4, 0, 2, 20, 2, CLK_IS_CRITICAL);
+
+static SUNXI_CCU_GATE(dram_ve_clk, "dram-ve", "dram",
+ 0x100, BIT(0), 0);
+static SUNXI_CCU_GATE(dram_csi0_clk, "dram-csi0", "dram",
+ 0x100, BIT(1), 0);
+static SUNXI_CCU_GATE(dram_csi1_clk, "dram-csi1", "dram",
+ 0x100, BIT(2), 0);
+static SUNXI_CCU_GATE(dram_ts_clk, "dram-ts", "dram",
+ 0x100, BIT(3), 0);
+static SUNXI_CCU_GATE(dram_tvd_clk, "dram-tvd", "dram",
+ 0x100, BIT(4), 0);
+static SUNXI_CCU_GATE(dram_mp_clk, "dram-mp", "dram",
+ 0x100, BIT(5), 0);
+static SUNXI_CCU_GATE(dram_deinterlace_clk, "dram-deinterlace", "dram",
+ 0x100, BIT(6), 0);
+
+static const char * const de_parents[] = { "pll-periph0-2x", "pll-de" };
+static SUNXI_CCU_M_WITH_MUX_GATE(de_clk, "de", de_parents,
+ 0x104, 0, 4, 24, 3, BIT(31), 0);
+static SUNXI_CCU_M_WITH_MUX_GATE(mp_clk, "mp", de_parents,
+ 0x108, 0, 4, 24, 3, BIT(31), 0);
+
+static const char * const tcon_parents[] = { "pll-video0", "pll-video1",
+ "pll-video0-2x", "pll-video1-2x",
+ "pll-mipi" };
+static SUNXI_CCU_MUX_WITH_GATE(tcon_lcd0_clk, "tcon-lcd0", tcon_parents,
+ 0x110, 24, 3, BIT(31), CLK_SET_RATE_PARENT);
+static SUNXI_CCU_MUX_WITH_GATE(tcon_lcd1_clk, "tcon-lcd1", tcon_parents,
+ 0x114, 24, 3, BIT(31), CLK_SET_RATE_PARENT);
+static SUNXI_CCU_M_WITH_MUX_GATE(tcon_tv0_clk, "tcon-tv0", tcon_parents,
+ 0x118, 0, 4, 24, 3, BIT(31), 0);
+static SUNXI_CCU_M_WITH_MUX_GATE(tcon_tv1_clk, "tcon-tv1", tcon_parents,
+ 0x11c, 0, 4, 24, 3, BIT(31), 0);
+
+static const char * const deinterlace_parents[] = { "pll-periph0",
+ "pll-periph1" };
+static SUNXI_CCU_M_WITH_MUX_GATE(deinterlace_clk, "deinterlace",
+ deinterlace_parents, 0x124, 0, 4, 24, 3,
+ BIT(31), 0);
+
+static const char * const csi_mclk_parents[] = { "osc24M", "pll-video1",
+ "pll-periph1" };
+static SUNXI_CCU_M_WITH_MUX_GATE(csi1_mclk_clk, "csi1-mclk", csi_mclk_parents,
+ 0x130, 0, 5, 8, 3, BIT(15), 0);
+
+static const char * const csi_sclk_parents[] = { "pll-periph0", "pll-periph1" };
+static SUNXI_CCU_M_WITH_MUX_GATE(csi_sclk_clk, "csi-sclk", csi_sclk_parents,
+ 0x134, 16, 4, 24, 3, BIT(31), 0);
+
+static SUNXI_CCU_M_WITH_MUX_GATE(csi0_mclk_clk, "csi0-mclk", csi_mclk_parents,
+ 0x134, 0, 5, 8, 3, BIT(15), 0);
+
+static SUNXI_CCU_M_WITH_GATE(ve_clk, "ve", "pll-ve",
+ 0x13c, 16, 3, BIT(31), CLK_SET_RATE_PARENT);
+
+static SUNXI_CCU_GATE(codec_clk, "codec", "pll-audio",
+ 0x140, BIT(31), CLK_SET_RATE_PARENT);
+static SUNXI_CCU_GATE(avs_clk, "avs", "osc24M",
+ 0x144, BIT(31), 0);
+
+static const char * const hdmi_parents[] = { "pll-video0", "pll-video1" };
+static SUNXI_CCU_M_WITH_MUX_GATE(hdmi_clk, "hdmi", hdmi_parents,
+ 0x150, 0, 4, 24, 2, BIT(31), 0);
+
+static SUNXI_CCU_GATE(hdmi_slow_clk, "hdmi-slow", "osc24M",
+ 0x154, BIT(31), 0);
+
+/*
+ * In the SoC's user manual, the P factor is mentioned, but not used in
+ * the frequency formula.
+ *
+ * Here the factor is included, according to the BSP kernel source,
+ * which contains the P factor of this clock.
+ */
+static const char * const mbus_parents[] = { "osc24M", "pll-periph0-2x",
+ "pll-ddr0" };
+static SUNXI_CCU_MP_WITH_MUX_GATE(mbus_clk, "mbus", mbus_parents, 0x15c,
+ 0, 4, /* M */
+ 16, 2, /* P */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ CLK_IS_CRITICAL);
+
+static const char * const dsi_dphy_parents[] = { "pll-video0", "pll-video1",
+ "pll-periph0" };
+static SUNXI_CCU_M_WITH_MUX_GATE(dsi_dphy_clk, "dsi-dphy", dsi_dphy_parents,
+ 0x168, 0, 4, 8, 2, BIT(15), 0);
+
+static SUNXI_CCU_M_WITH_MUX_GATE(tve0_clk, "tve0", tcon_parents,
+ 0x180, 0, 4, 24, 3, BIT(31), 0);
+static SUNXI_CCU_M_WITH_MUX_GATE(tve1_clk, "tve1", tcon_parents,
+ 0x184, 0, 4, 24, 3, BIT(31), 0);
+
+static const char * const tvd_parents[] = { "pll-video0", "pll-video1",
+ "pll-video0-2x", "pll-video1-2x" };
+static SUNXI_CCU_M_WITH_MUX_GATE(tvd0_clk, "tvd0", tvd_parents,
+ 0x188, 0, 4, 24, 3, BIT(31), 0);
+static SUNXI_CCU_M_WITH_MUX_GATE(tvd1_clk, "tvd1", tvd_parents,
+ 0x18c, 0, 4, 24, 3, BIT(31), 0);
+static SUNXI_CCU_M_WITH_MUX_GATE(tvd2_clk, "tvd2", tvd_parents,
+ 0x190, 0, 4, 24, 3, BIT(31), 0);
+static SUNXI_CCU_M_WITH_MUX_GATE(tvd3_clk, "tvd3", tvd_parents,
+ 0x194, 0, 4, 24, 3, BIT(31), 0);
+
+static SUNXI_CCU_M_WITH_GATE(gpu_clk, "gpu", "pll-gpu",
+ 0x1a0, 0, 3, BIT(31), CLK_SET_RATE_PARENT);
+
+static const char * const out_parents[] = { "osc24M", "osc32k", "osc24M" };
+static const struct ccu_mux_fixed_prediv out_predivs[] = {
+ { .index = 0, .div = 750, },
+};
+
+static struct ccu_mp outa_clk = {
+ .enable = BIT(31),
+ .m = _SUNXI_CCU_DIV(8, 5),
+ .p = _SUNXI_CCU_DIV(20, 2),
+ .mux = {
+ .shift = 24,
+ .width = 2,
+ .fixed_predivs = out_predivs,
+ .n_predivs = ARRAY_SIZE(out_predivs),
+ },
+ .common = {
+ .reg = 0x1f0,
+ .features = CCU_FEATURE_FIXED_PREDIV,
+ .hw.init = CLK_HW_INIT_PARENTS("outa", out_parents,
+ &ccu_mp_ops, 0),
+ }
+};
+
+static struct ccu_mp outb_clk = {
+ .enable = BIT(31),
+ .m = _SUNXI_CCU_DIV(8, 5),
+ .p = _SUNXI_CCU_DIV(20, 2),
+ .mux = {
+ .shift = 24,
+ .width = 2,
+ .fixed_predivs = out_predivs,
+ .n_predivs = ARRAY_SIZE(out_predivs),
+ },
+ .common = {
+ .reg = 0x1f4,
+ .features = CCU_FEATURE_FIXED_PREDIV,
+ .hw.init = CLK_HW_INIT_PARENTS("outb", out_parents,
+ &ccu_mp_ops, 0),
+ }
+};
+
+static struct ccu_common *sun8i_r40_ccu_clks[] = {
+ &pll_cpu_clk.common,
+ &pll_audio_base_clk.common,
+ &pll_video0_clk.common,
+ &pll_ve_clk.common,
+ &pll_ddr0_clk.common,
+ &pll_periph0_clk.common,
+ &pll_periph0_sata_clk.common,
+ &pll_periph1_clk.common,
+ &pll_video1_clk.common,
+ &pll_sata_clk.common,
+ &pll_sata_out_clk.common,
+ &pll_gpu_clk.common,
+ &pll_mipi_clk.common,
+ &pll_de_clk.common,
+ &pll_ddr1_clk.common,
+ &cpu_clk.common,
+ &axi_clk.common,
+ &ahb1_clk.common,
+ &apb1_clk.common,
+ &apb2_clk.common,
+ &bus_mipi_dsi_clk.common,
+ &bus_ce_clk.common,
+ &bus_dma_clk.common,
+ &bus_mmc0_clk.common,
+ &bus_mmc1_clk.common,
+ &bus_mmc2_clk.common,
+ &bus_mmc3_clk.common,
+ &bus_nand_clk.common,
+ &bus_dram_clk.common,
+ &bus_emac_clk.common,
+ &bus_ts_clk.common,
+ &bus_hstimer_clk.common,
+ &bus_spi0_clk.common,
+ &bus_spi1_clk.common,
+ &bus_spi2_clk.common,
+ &bus_spi3_clk.common,
+ &bus_sata_clk.common,
+ &bus_otg_clk.common,
+ &bus_ehci0_clk.common,
+ &bus_ehci1_clk.common,
+ &bus_ehci2_clk.common,
+ &bus_ohci0_clk.common,
+ &bus_ohci1_clk.common,
+ &bus_ohci2_clk.common,
+ &bus_ve_clk.common,
+ &bus_mp_clk.common,
+ &bus_deinterlace_clk.common,
+ &bus_csi0_clk.common,
+ &bus_csi1_clk.common,
+ &bus_hdmi0_clk.common,
+ &bus_hdmi1_clk.common,
+ &bus_de_clk.common,
+ &bus_tve0_clk.common,
+ &bus_tve1_clk.common,
+ &bus_tve_top_clk.common,
+ &bus_gmac_clk.common,
+ &bus_gpu_clk.common,
+ &bus_tvd0_clk.common,
+ &bus_tvd1_clk.common,
+ &bus_tvd2_clk.common,
+ &bus_tvd3_clk.common,
+ &bus_tvd_top_clk.common,
+ &bus_tcon_lcd0_clk.common,
+ &bus_tcon_lcd1_clk.common,
+ &bus_tcon_tv0_clk.common,
+ &bus_tcon_tv1_clk.common,
+ &bus_tcon_top_clk.common,
+ &bus_codec_clk.common,
+ &bus_spdif_clk.common,
+ &bus_ac97_clk.common,
+ &bus_pio_clk.common,
+ &bus_ir0_clk.common,
+ &bus_ir1_clk.common,
+ &bus_ths_clk.common,
+ &bus_keypad_clk.common,
+ &bus_i2s0_clk.common,
+ &bus_i2s1_clk.common,
+ &bus_i2s2_clk.common,
+ &bus_i2c0_clk.common,
+ &bus_i2c1_clk.common,
+ &bus_i2c2_clk.common,
+ &bus_i2c3_clk.common,
+ &bus_can_clk.common,
+ &bus_scr_clk.common,
+ &bus_ps20_clk.common,
+ &bus_ps21_clk.common,
+ &bus_i2c4_clk.common,
+ &bus_uart0_clk.common,
+ &bus_uart1_clk.common,
+ &bus_uart2_clk.common,
+ &bus_uart3_clk.common,
+ &bus_uart4_clk.common,
+ &bus_uart5_clk.common,
+ &bus_uart6_clk.common,
+ &bus_uart7_clk.common,
+ &bus_dbg_clk.common,
+ &ths_clk.common,
+ &nand_clk.common,
+ &mmc0_clk.common,
+ &mmc1_clk.common,
+ &mmc2_clk.common,
+ &mmc3_clk.common,
+ &ts_clk.common,
+ &ce_clk.common,
+ &spi0_clk.common,
+ &spi1_clk.common,
+ &spi2_clk.common,
+ &spi3_clk.common,
+ &i2s0_clk.common,
+ &i2s1_clk.common,
+ &i2s2_clk.common,
+ &ac97_clk.common,
+ &spdif_clk.common,
+ &keypad_clk.common,
+ &sata_clk.common,
+ &usb_phy0_clk.common,
+ &usb_phy1_clk.common,
+ &usb_phy2_clk.common,
+ &usb_ohci0_clk.common,
+ &usb_ohci1_clk.common,
+ &usb_ohci2_clk.common,
+ &ir0_clk.common,
+ &ir1_clk.common,
+ &dram_clk.common,
+ &dram_ve_clk.common,
+ &dram_csi0_clk.common,
+ &dram_csi1_clk.common,
+ &dram_ts_clk.common,
+ &dram_tvd_clk.common,
+ &dram_mp_clk.common,
+ &dram_deinterlace_clk.common,
+ &de_clk.common,
+ &mp_clk.common,
+ &tcon_lcd0_clk.common,
+ &tcon_lcd1_clk.common,
+ &tcon_tv0_clk.common,
+ &tcon_tv1_clk.common,
+ &deinterlace_clk.common,
+ &csi1_mclk_clk.common,
+ &csi_sclk_clk.common,
+ &csi0_mclk_clk.common,
+ &ve_clk.common,
+ &codec_clk.common,
+ &avs_clk.common,
+ &hdmi_clk.common,
+ &hdmi_slow_clk.common,
+ &mbus_clk.common,
+ &dsi_dphy_clk.common,
+ &tve0_clk.common,
+ &tve1_clk.common,
+ &tvd0_clk.common,
+ &tvd1_clk.common,
+ &tvd2_clk.common,
+ &tvd3_clk.common,
+ &gpu_clk.common,
+ &outa_clk.common,
+ &outb_clk.common,
+};
+
+/* Fixed Factor clocks */
+static CLK_FIXED_FACTOR(osc12M_clk, "osc12M", "osc24M", 2, 1, 0);
+
+/* We hardcode the divider to 4 for now */
+static CLK_FIXED_FACTOR(pll_audio_clk, "pll-audio",
+ "pll-audio-base", 4, 1, CLK_SET_RATE_PARENT);
+static CLK_FIXED_FACTOR(pll_audio_2x_clk, "pll-audio-2x",
+ "pll-audio-base", 2, 1, CLK_SET_RATE_PARENT);
+static CLK_FIXED_FACTOR(pll_audio_4x_clk, "pll-audio-4x",
+ "pll-audio-base", 1, 1, CLK_SET_RATE_PARENT);
+static CLK_FIXED_FACTOR(pll_audio_8x_clk, "pll-audio-8x",
+ "pll-audio-base", 1, 2, CLK_SET_RATE_PARENT);
+static CLK_FIXED_FACTOR(pll_periph0_2x_clk, "pll-periph0-2x",
+ "pll-periph0", 1, 2, 0);
+static CLK_FIXED_FACTOR(pll_periph1_2x_clk, "pll-periph1-2x",
+ "pll-periph1", 1, 2, 0);
+static CLK_FIXED_FACTOR(pll_video0_2x_clk, "pll-video0-2x",
+ "pll-video0", 1, 2, 0);
+static CLK_FIXED_FACTOR(pll_video1_2x_clk, "pll-video1-2x",
+ "pll-video1", 1, 2, 0);
+
+static struct clk_hw_onecell_data sun8i_r40_hw_clks = {
+ .hws = {
+ [CLK_OSC_12M] = &osc12M_clk.hw,
+ [CLK_PLL_CPU] = &pll_cpu_clk.common.hw,
+ [CLK_PLL_AUDIO_BASE] = &pll_audio_base_clk.common.hw,
+ [CLK_PLL_AUDIO] = &pll_audio_clk.hw,
+ [CLK_PLL_AUDIO_2X] = &pll_audio_2x_clk.hw,
+ [CLK_PLL_AUDIO_4X] = &pll_audio_4x_clk.hw,
+ [CLK_PLL_AUDIO_8X] = &pll_audio_8x_clk.hw,
+ [CLK_PLL_VIDEO0] = &pll_video0_clk.common.hw,
+ [CLK_PLL_VIDEO0_2X] = &pll_video0_2x_clk.hw,
+ [CLK_PLL_VE] = &pll_ve_clk.common.hw,
+ [CLK_PLL_DDR0] = &pll_ddr0_clk.common.hw,
+ [CLK_PLL_PERIPH0] = &pll_periph0_clk.common.hw,
+ [CLK_PLL_PERIPH0_SATA] = &pll_periph0_sata_clk.common.hw,
+ [CLK_PLL_PERIPH0_2X] = &pll_periph0_2x_clk.hw,
+ [CLK_PLL_PERIPH1] = &pll_periph1_clk.common.hw,
+ [CLK_PLL_PERIPH1_2X] = &pll_periph1_2x_clk.hw,
+ [CLK_PLL_VIDEO1] = &pll_video1_clk.common.hw,
+ [CLK_PLL_VIDEO1_2X] = &pll_video1_2x_clk.hw,
+ [CLK_PLL_SATA] = &pll_sata_clk.common.hw,
+ [CLK_PLL_SATA_OUT] = &pll_sata_out_clk.common.hw,
+ [CLK_PLL_GPU] = &pll_gpu_clk.common.hw,
+ [CLK_PLL_MIPI] = &pll_mipi_clk.common.hw,
+ [CLK_PLL_DE] = &pll_de_clk.common.hw,
+ [CLK_PLL_DDR1] = &pll_ddr1_clk.common.hw,
+ [CLK_CPU] = &cpu_clk.common.hw,
+ [CLK_AXI] = &axi_clk.common.hw,
+ [CLK_AHB1] = &ahb1_clk.common.hw,
+ [CLK_APB1] = &apb1_clk.common.hw,
+ [CLK_APB2] = &apb2_clk.common.hw,
+ [CLK_BUS_MIPI_DSI] = &bus_mipi_dsi_clk.common.hw,
+ [CLK_BUS_CE] = &bus_ce_clk.common.hw,
+ [CLK_BUS_DMA] = &bus_dma_clk.common.hw,
+ [CLK_BUS_MMC0] = &bus_mmc0_clk.common.hw,
+ [CLK_BUS_MMC1] = &bus_mmc1_clk.common.hw,
+ [CLK_BUS_MMC2] = &bus_mmc2_clk.common.hw,
+ [CLK_BUS_MMC3] = &bus_mmc3_clk.common.hw,
+ [CLK_BUS_NAND] = &bus_nand_clk.common.hw,
+ [CLK_BUS_DRAM] = &bus_dram_clk.common.hw,
+ [CLK_BUS_EMAC] = &bus_emac_clk.common.hw,
+ [CLK_BUS_TS] = &bus_ts_clk.common.hw,
+ [CLK_BUS_HSTIMER] = &bus_hstimer_clk.common.hw,
+ [CLK_BUS_SPI0] = &bus_spi0_clk.common.hw,
+ [CLK_BUS_SPI1] = &bus_spi1_clk.common.hw,
+ [CLK_BUS_SPI2] = &bus_spi2_clk.common.hw,
+ [CLK_BUS_SPI3] = &bus_spi3_clk.common.hw,
+ [CLK_BUS_SATA] = &bus_sata_clk.common.hw,
+ [CLK_BUS_OTG] = &bus_otg_clk.common.hw,
+ [CLK_BUS_EHCI0] = &bus_ehci0_clk.common.hw,
+ [CLK_BUS_EHCI1] = &bus_ehci1_clk.common.hw,
+ [CLK_BUS_EHCI2] = &bus_ehci2_clk.common.hw,
+ [CLK_BUS_OHCI0] = &bus_ohci0_clk.common.hw,
+ [CLK_BUS_OHCI1] = &bus_ohci1_clk.common.hw,
+ [CLK_BUS_OHCI2] = &bus_ohci2_clk.common.hw,
+ [CLK_BUS_VE] = &bus_ve_clk.common.hw,
+ [CLK_BUS_MP] = &bus_mp_clk.common.hw,
+ [CLK_BUS_DEINTERLACE] = &bus_deinterlace_clk.common.hw,
+ [CLK_BUS_CSI0] = &bus_csi0_clk.common.hw,
+ [CLK_BUS_CSI1] = &bus_csi1_clk.common.hw,
+ [CLK_BUS_HDMI0] = &bus_hdmi0_clk.common.hw,
+ [CLK_BUS_HDMI1] = &bus_hdmi1_clk.common.hw,
+ [CLK_BUS_DE] = &bus_de_clk.common.hw,
+ [CLK_BUS_TVE0] = &bus_tve0_clk.common.hw,
+ [CLK_BUS_TVE1] = &bus_tve1_clk.common.hw,
+ [CLK_BUS_TVE_TOP] = &bus_tve_top_clk.common.hw,
+ [CLK_BUS_GMAC] = &bus_gmac_clk.common.hw,
+ [CLK_BUS_GPU] = &bus_gpu_clk.common.hw,
+ [CLK_BUS_TVD0] = &bus_tvd0_clk.common.hw,
+ [CLK_BUS_TVD1] = &bus_tvd1_clk.common.hw,
+ [CLK_BUS_TVD2] = &bus_tvd2_clk.common.hw,
+ [CLK_BUS_TVD3] = &bus_tvd3_clk.common.hw,
+ [CLK_BUS_TVD_TOP] = &bus_tvd_top_clk.common.hw,
+ [CLK_BUS_TCON_LCD0] = &bus_tcon_lcd0_clk.common.hw,
+ [CLK_BUS_TCON_LCD1] = &bus_tcon_lcd1_clk.common.hw,
+ [CLK_BUS_TCON_TV0] = &bus_tcon_tv0_clk.common.hw,
+ [CLK_BUS_TCON_TV1] = &bus_tcon_tv1_clk.common.hw,
+ [CLK_BUS_TCON_TOP] = &bus_tcon_top_clk.common.hw,
+ [CLK_BUS_CODEC] = &bus_codec_clk.common.hw,
+ [CLK_BUS_SPDIF] = &bus_spdif_clk.common.hw,
+ [CLK_BUS_AC97] = &bus_ac97_clk.common.hw,
+ [CLK_BUS_PIO] = &bus_pio_clk.common.hw,
+ [CLK_BUS_IR0] = &bus_ir0_clk.common.hw,
+ [CLK_BUS_IR1] = &bus_ir1_clk.common.hw,
+ [CLK_BUS_THS] = &bus_ths_clk.common.hw,
+ [CLK_BUS_KEYPAD] = &bus_keypad_clk.common.hw,
+ [CLK_BUS_I2S0] = &bus_i2s0_clk.common.hw,
+ [CLK_BUS_I2S1] = &bus_i2s1_clk.common.hw,
+ [CLK_BUS_I2S2] = &bus_i2s2_clk.common.hw,
+ [CLK_BUS_I2C0] = &bus_i2c0_clk.common.hw,
+ [CLK_BUS_I2C1] = &bus_i2c1_clk.common.hw,
+ [CLK_BUS_I2C2] = &bus_i2c2_clk.common.hw,
+ [CLK_BUS_I2C3] = &bus_i2c3_clk.common.hw,
+ [CLK_BUS_CAN] = &bus_can_clk.common.hw,
+ [CLK_BUS_SCR] = &bus_scr_clk.common.hw,
+ [CLK_BUS_PS20] = &bus_ps20_clk.common.hw,
+ [CLK_BUS_PS21] = &bus_ps21_clk.common.hw,
+ [CLK_BUS_I2C4] = &bus_i2c4_clk.common.hw,
+ [CLK_BUS_UART0] = &bus_uart0_clk.common.hw,
+ [CLK_BUS_UART1] = &bus_uart1_clk.common.hw,
+ [CLK_BUS_UART2] = &bus_uart2_clk.common.hw,
+ [CLK_BUS_UART3] = &bus_uart3_clk.common.hw,
+ [CLK_BUS_UART4] = &bus_uart4_clk.common.hw,
+ [CLK_BUS_UART5] = &bus_uart5_clk.common.hw,
+ [CLK_BUS_UART6] = &bus_uart6_clk.common.hw,
+ [CLK_BUS_UART7] = &bus_uart7_clk.common.hw,
+ [CLK_BUS_DBG] = &bus_dbg_clk.common.hw,
+ [CLK_THS] = &ths_clk.common.hw,
+ [CLK_NAND] = &nand_clk.common.hw,
+ [CLK_MMC0] = &mmc0_clk.common.hw,
+ [CLK_MMC1] = &mmc1_clk.common.hw,
+ [CLK_MMC2] = &mmc2_clk.common.hw,
+ [CLK_MMC3] = &mmc3_clk.common.hw,
+ [CLK_TS] = &ts_clk.common.hw,
+ [CLK_CE] = &ce_clk.common.hw,
+ [CLK_SPI0] = &spi0_clk.common.hw,
+ [CLK_SPI1] = &spi1_clk.common.hw,
+ [CLK_SPI2] = &spi2_clk.common.hw,
+ [CLK_SPI3] = &spi3_clk.common.hw,
+ [CLK_I2S0] = &i2s0_clk.common.hw,
+ [CLK_I2S1] = &i2s1_clk.common.hw,
+ [CLK_I2S2] = &i2s2_clk.common.hw,
+ [CLK_AC97] = &ac97_clk.common.hw,
+ [CLK_SPDIF] = &spdif_clk.common.hw,
+ [CLK_KEYPAD] = &keypad_clk.common.hw,
+ [CLK_SATA] = &sata_clk.common.hw,
+ [CLK_USB_PHY0] = &usb_phy0_clk.common.hw,
+ [CLK_USB_PHY1] = &usb_phy1_clk.common.hw,
+ [CLK_USB_PHY2] = &usb_phy2_clk.common.hw,
+ [CLK_USB_OHCI0] = &usb_ohci0_clk.common.hw,
+ [CLK_USB_OHCI1] = &usb_ohci1_clk.common.hw,
+ [CLK_USB_OHCI2] = &usb_ohci2_clk.common.hw,
+ [CLK_IR0] = &ir0_clk.common.hw,
+ [CLK_IR1] = &ir1_clk.common.hw,
+ [CLK_DRAM] = &dram_clk.common.hw,
+ [CLK_DRAM_VE] = &dram_ve_clk.common.hw,
+ [CLK_DRAM_CSI0] = &dram_csi0_clk.common.hw,
+ [CLK_DRAM_CSI1] = &dram_csi1_clk.common.hw,
+ [CLK_DRAM_TS] = &dram_ts_clk.common.hw,
+ [CLK_DRAM_TVD] = &dram_tvd_clk.common.hw,
+ [CLK_DRAM_MP] = &dram_mp_clk.common.hw,
+ [CLK_DRAM_DEINTERLACE] = &dram_deinterlace_clk.common.hw,
+ [CLK_DE] = &de_clk.common.hw,
+ [CLK_MP] = &mp_clk.common.hw,
+ [CLK_TCON_LCD0] = &tcon_lcd0_clk.common.hw,
+ [CLK_TCON_LCD1] = &tcon_lcd1_clk.common.hw,
+ [CLK_TCON_TV0] = &tcon_tv0_clk.common.hw,
+ [CLK_TCON_TV1] = &tcon_tv1_clk.common.hw,
+ [CLK_DEINTERLACE] = &deinterlace_clk.common.hw,
+ [CLK_CSI1_MCLK] = &csi1_mclk_clk.common.hw,
+ [CLK_CSI_SCLK] = &csi_sclk_clk.common.hw,
+ [CLK_CSI0_MCLK] = &csi0_mclk_clk.common.hw,
+ [CLK_VE] = &ve_clk.common.hw,
+ [CLK_CODEC] = &codec_clk.common.hw,
+ [CLK_AVS] = &avs_clk.common.hw,
+ [CLK_HDMI] = &hdmi_clk.common.hw,
+ [CLK_HDMI_SLOW] = &hdmi_slow_clk.common.hw,
+ [CLK_MBUS] = &mbus_clk.common.hw,
+ [CLK_DSI_DPHY] = &dsi_dphy_clk.common.hw,
+ [CLK_TVE0] = &tve0_clk.common.hw,
+ [CLK_TVE1] = &tve1_clk.common.hw,
+ [CLK_TVD0] = &tvd0_clk.common.hw,
+ [CLK_TVD1] = &tvd1_clk.common.hw,
+ [CLK_TVD2] = &tvd2_clk.common.hw,
+ [CLK_TVD3] = &tvd3_clk.common.hw,
+ [CLK_GPU] = &gpu_clk.common.hw,
+ [CLK_OUTA] = &outa_clk.common.hw,
+ [CLK_OUTB] = &outb_clk.common.hw,
+ },
+ .num = CLK_NUMBER,
+};
+
+static struct ccu_reset_map sun8i_r40_ccu_resets[] = {
+ [RST_USB_PHY0] = { 0x0cc, BIT(0) },
+ [RST_USB_PHY1] = { 0x0cc, BIT(1) },
+ [RST_USB_PHY2] = { 0x0cc, BIT(2) },
+
+ [RST_DRAM] = { 0x0f4, BIT(31) },
+ [RST_MBUS] = { 0x0fc, BIT(31) },
+
+ [RST_BUS_MIPI_DSI] = { 0x2c0, BIT(1) },
+ [RST_BUS_CE] = { 0x2c0, BIT(5) },
+ [RST_BUS_DMA] = { 0x2c0, BIT(6) },
+ [RST_BUS_MMC0] = { 0x2c0, BIT(8) },
+ [RST_BUS_MMC1] = { 0x2c0, BIT(9) },
+ [RST_BUS_MMC2] = { 0x2c0, BIT(10) },
+ [RST_BUS_MMC3] = { 0x2c0, BIT(11) },
+ [RST_BUS_NAND] = { 0x2c0, BIT(13) },
+ [RST_BUS_DRAM] = { 0x2c0, BIT(14) },
+ [RST_BUS_EMAC] = { 0x2c0, BIT(17) },
+ [RST_BUS_TS] = { 0x2c0, BIT(18) },
+ [RST_BUS_HSTIMER] = { 0x2c0, BIT(19) },
+ [RST_BUS_SPI0] = { 0x2c0, BIT(20) },
+ [RST_BUS_SPI1] = { 0x2c0, BIT(21) },
+ [RST_BUS_SPI2] = { 0x2c0, BIT(22) },
+ [RST_BUS_SPI3] = { 0x2c0, BIT(23) },
+ [RST_BUS_SATA] = { 0x2c0, BIT(24) },
+ [RST_BUS_OTG] = { 0x2c0, BIT(25) },
+ [RST_BUS_EHCI0] = { 0x2c0, BIT(26) },
+ [RST_BUS_EHCI1] = { 0x2c0, BIT(27) },
+ [RST_BUS_EHCI2] = { 0x2c0, BIT(28) },
+ [RST_BUS_OHCI0] = { 0x2c0, BIT(29) },
+ [RST_BUS_OHCI1] = { 0x2c0, BIT(30) },
+ [RST_BUS_OHCI2] = { 0x2c0, BIT(31) },
+
+ [RST_BUS_VE] = { 0x2c4, BIT(0) },
+ [RST_BUS_MP] = { 0x2c4, BIT(2) },
+ [RST_BUS_DEINTERLACE] = { 0x2c4, BIT(5) },
+ [RST_BUS_CSI0] = { 0x2c4, BIT(8) },
+ [RST_BUS_CSI1] = { 0x2c4, BIT(9) },
+ [RST_BUS_HDMI0] = { 0x2c4, BIT(10) },
+ [RST_BUS_HDMI1] = { 0x2c4, BIT(11) },
+ [RST_BUS_DE] = { 0x2c4, BIT(12) },
+ [RST_BUS_TVE0] = { 0x2c4, BIT(13) },
+ [RST_BUS_TVE1] = { 0x2c4, BIT(14) },
+ [RST_BUS_TVE_TOP] = { 0x2c4, BIT(15) },
+ [RST_BUS_GMAC] = { 0x2c4, BIT(17) },
+ [RST_BUS_GPU] = { 0x2c4, BIT(20) },
+ [RST_BUS_TVD0] = { 0x2c4, BIT(21) },
+ [RST_BUS_TVD1] = { 0x2c4, BIT(22) },
+ [RST_BUS_TVD2] = { 0x2c4, BIT(23) },
+ [RST_BUS_TVD3] = { 0x2c4, BIT(24) },
+ [RST_BUS_TVD_TOP] = { 0x2c4, BIT(25) },
+ [RST_BUS_TCON_LCD0] = { 0x2c4, BIT(26) },
+ [RST_BUS_TCON_LCD1] = { 0x2c4, BIT(27) },
+ [RST_BUS_TCON_TV0] = { 0x2c4, BIT(28) },
+ [RST_BUS_TCON_TV1] = { 0x2c4, BIT(29) },
+ [RST_BUS_TCON_TOP] = { 0x2c4, BIT(30) },
+ [RST_BUS_DBG] = { 0x2c4, BIT(31) },
+
+ [RST_BUS_LVDS] = { 0x2c8, BIT(0) },
+
+ [RST_BUS_CODEC] = { 0x2d0, BIT(0) },
+ [RST_BUS_SPDIF] = { 0x2d0, BIT(1) },
+ [RST_BUS_AC97] = { 0x2d0, BIT(2) },
+ [RST_BUS_IR0] = { 0x2d0, BIT(6) },
+ [RST_BUS_IR1] = { 0x2d0, BIT(7) },
+ [RST_BUS_THS] = { 0x2d0, BIT(8) },
+ [RST_BUS_KEYPAD] = { 0x2d0, BIT(10) },
+ [RST_BUS_I2S0] = { 0x2d0, BIT(12) },
+ [RST_BUS_I2S1] = { 0x2d0, BIT(13) },
+ [RST_BUS_I2S2] = { 0x2d0, BIT(14) },
+
+ [RST_BUS_I2C0] = { 0x2d8, BIT(0) },
+ [RST_BUS_I2C1] = { 0x2d8, BIT(1) },
+ [RST_BUS_I2C2] = { 0x2d8, BIT(2) },
+ [RST_BUS_I2C3] = { 0x2d8, BIT(3) },
+ [RST_BUS_CAN] = { 0x2d8, BIT(4) },
+ [RST_BUS_SCR] = { 0x2d8, BIT(5) },
+ [RST_BUS_PS20] = { 0x2d8, BIT(6) },
+ [RST_BUS_PS21] = { 0x2d8, BIT(7) },
+ [RST_BUS_I2C4] = { 0x2d8, BIT(15) },
+ [RST_BUS_UART0] = { 0x2d8, BIT(16) },
+ [RST_BUS_UART1] = { 0x2d8, BIT(17) },
+ [RST_BUS_UART2] = { 0x2d8, BIT(18) },
+ [RST_BUS_UART3] = { 0x2d8, BIT(19) },
+ [RST_BUS_UART4] = { 0x2d8, BIT(20) },
+ [RST_BUS_UART5] = { 0x2d8, BIT(21) },
+ [RST_BUS_UART6] = { 0x2d8, BIT(22) },
+ [RST_BUS_UART7] = { 0x2d8, BIT(23) },
+};
+
+static const struct sunxi_ccu_desc sun8i_r40_ccu_desc = {
+ .ccu_clks = sun8i_r40_ccu_clks,
+ .num_ccu_clks = ARRAY_SIZE(sun8i_r40_ccu_clks),
+
+ .hw_clks = &sun8i_r40_hw_clks,
+
+ .resets = sun8i_r40_ccu_resets,
+ .num_resets = ARRAY_SIZE(sun8i_r40_ccu_resets),
+};
+
+static struct ccu_pll_nb sun8i_r40_pll_cpu_nb = {
+ .common = &pll_cpu_clk.common,
+ /* copy from pll_cpu_clk */
+ .enable = BIT(31),
+ .lock = BIT(28),
+};
+
+static struct ccu_mux_nb sun8i_r40_cpu_nb = {
+ .common = &cpu_clk.common,
+ .cm = &cpu_clk.mux,
+ .delay_us = 1, /* > 8 clock cycles at 24 MHz */
+ .bypass_index = 1, /* index of 24 MHz oscillator */
+};
+
+static void __init sun8i_r40_ccu_setup(struct device_node *node)
+{
+ void __iomem *reg;
+ u32 val;
+
+ reg = of_io_request_and_map(node, 0, of_node_full_name(node));
+ if (IS_ERR(reg)) {
+ pr_err("%s: Could not map the clock registers\n",
+ of_node_full_name(node));
+ return;
+ }
+
+ /* Force the PLL-Audio-1x divider to 4 */
+ val = readl(reg + SUN8I_R40_PLL_AUDIO_REG);
+ val &= ~GENMASK(19, 16);
+ writel(val | (3 << 16), reg + SUN8I_R40_PLL_AUDIO_REG);
+
+ /* Force PLL-MIPI to MIPI mode */
+ val = readl(reg + SUN8I_R40_PLL_MIPI_REG);
+ val &= ~BIT(16);
+ writel(val, reg + SUN8I_R40_PLL_MIPI_REG);
+
+ /* Force OHCI 12M parent to 12M divided from 48M */
+ val = readl(reg + SUN8I_R40_USB_CLK_REG);
+ val &= ~GENMASK(25, 20);
+ writel(val, reg + SUN8I_R40_USB_CLK_REG);
+
+ sunxi_ccu_probe(node, reg, &sun8i_r40_ccu_desc);
+
+ /* Gate then ungate PLL CPU after any rate changes */
+ ccu_pll_notifier_register(&sun8i_r40_pll_cpu_nb);
+
+ /* Reparent CPU during PLL CPU rate changes */
+ ccu_mux_notifier_register(pll_cpu_clk.common.hw.clk,
+ &sun8i_r40_cpu_nb);
+}
+CLK_OF_DECLARE(sun8i_r40_ccu, "allwinner,sun8i-r40-ccu",
+ sun8i_r40_ccu_setup);
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-r40.h b/drivers/clk/sunxi-ng/ccu-sun8i-r40.h
new file mode 100644
index 000000000000..0db8e1e97af8
--- /dev/null
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-r40.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright 2017 Icenowy Zheng <icenowy@aosc.io>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CCU_SUN8I_R40_H_
+#define _CCU_SUN8I_R40_H_
+
+#include <dt-bindings/clock/sun8i-r40-ccu.h>
+#include <dt-bindings/reset/sun8i-r40-ccu.h>
+
+#define CLK_OSC_12M 0
+#define CLK_PLL_CPU 1
+#define CLK_PLL_AUDIO_BASE 2
+#define CLK_PLL_AUDIO 3
+#define CLK_PLL_AUDIO_2X 4
+#define CLK_PLL_AUDIO_4X 5
+#define CLK_PLL_AUDIO_8X 6
+#define CLK_PLL_VIDEO0 7
+#define CLK_PLL_VIDEO0_2X 8
+#define CLK_PLL_VE 9
+#define CLK_PLL_DDR0 10
+#define CLK_PLL_PERIPH0 11
+#define CLK_PLL_PERIPH0_SATA 12
+#define CLK_PLL_PERIPH0_2X 13
+#define CLK_PLL_PERIPH1 14
+#define CLK_PLL_PERIPH1_2X 15
+#define CLK_PLL_VIDEO1 16
+#define CLK_PLL_VIDEO1_2X 17
+#define CLK_PLL_SATA 18
+#define CLK_PLL_SATA_OUT 19
+#define CLK_PLL_GPU 20
+#define CLK_PLL_MIPI 21
+#define CLK_PLL_DE 22
+#define CLK_PLL_DDR1 23
+
+/* The CPU clock is exported */
+
+#define CLK_AXI 25
+#define CLK_AHB1 26
+#define CLK_APB1 27
+#define CLK_APB2 28
+
+/* All the bus gates are exported */
+
+/* The first bunch of module clocks are exported */
+
+#define CLK_DRAM 132
+
+/* All the DRAM gates are exported */
+
+/* Some more module clocks are exported */
+
+#define CLK_MBUS 155
+
+/* Another bunch of module clocks are exported */
+
+#define CLK_NUMBER (CLK_OUTB + 1)
+
+#endif /* _CCU_SUN8I_R40_H_ */
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
index a34a78d7fb28..621b1cd996db 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
@@ -575,8 +575,7 @@ static void __init sun8i_v3s_ccu_setup(struct device_node *node)
reg = of_io_request_and_map(node, 0, of_node_full_name(node));
if (IS_ERR(reg)) {
- pr_err("%s: Could not map the clock registers\n",
- of_node_full_name(node));
+ pr_err("%pOF: Could not map the clock registers\n", node);
return;
}
diff --git a/drivers/clk/sunxi-ng/ccu_common.h b/drivers/clk/sunxi-ng/ccu_common.h
index d6fdd7a789aa..cadd1a9f93b6 100644
--- a/drivers/clk/sunxi-ng/ccu_common.h
+++ b/drivers/clk/sunxi-ng/ccu_common.h
@@ -23,6 +23,10 @@
#define CCU_FEATURE_FIXED_POSTDIV BIT(3)
#define CCU_FEATURE_ALL_PREDIV BIT(4)
#define CCU_FEATURE_LOCK_REG BIT(5)
+#define CCU_FEATURE_MMC_TIMING_SWITCH BIT(6)
+
+/* MMC timing mode switch bit */
+#define CCU_MMC_NEW_TIMING_MODE BIT(30)
struct device_node;
diff --git a/drivers/clk/sunxi-ng/ccu_div.c b/drivers/clk/sunxi-ng/ccu_div.c
index c0e5c10d0091..baa3cf96507b 100644
--- a/drivers/clk/sunxi-ng/ccu_div.c
+++ b/drivers/clk/sunxi-ng/ccu_div.c
@@ -21,10 +21,18 @@ static unsigned long ccu_div_round_rate(struct ccu_mux_internal *mux,
{
struct ccu_div *cd = data;
- return divider_round_rate_parent(&cd->common.hw, parent,
+ if (cd->common.features & CCU_FEATURE_FIXED_POSTDIV)
+ rate *= cd->fixed_post_div;
+
+ rate = divider_round_rate_parent(&cd->common.hw, parent,
rate, parent_rate,
cd->div.table, cd->div.width,
cd->div.flags);
+
+ if (cd->common.features & CCU_FEATURE_FIXED_POSTDIV)
+ rate /= cd->fixed_post_div;
+
+ return rate;
}
static void ccu_div_disable(struct clk_hw *hw)
@@ -62,8 +70,13 @@ static unsigned long ccu_div_recalc_rate(struct clk_hw *hw,
parent_rate = ccu_mux_helper_apply_prediv(&cd->common, &cd->mux, -1,
parent_rate);
- return divider_recalc_rate(hw, parent_rate, val, cd->div.table,
- cd->div.flags);
+ val = divider_recalc_rate(hw, parent_rate, val, cd->div.table,
+ cd->div.flags);
+
+ if (cd->common.features & CCU_FEATURE_FIXED_POSTDIV)
+ val /= cd->fixed_post_div;
+
+ return val;
}
static int ccu_div_determine_rate(struct clk_hw *hw,
@@ -86,6 +99,9 @@ static int ccu_div_set_rate(struct clk_hw *hw, unsigned long rate,
parent_rate = ccu_mux_helper_apply_prediv(&cd->common, &cd->mux, -1,
parent_rate);
+ if (cd->common.features & CCU_FEATURE_FIXED_POSTDIV)
+ rate *= cd->fixed_post_div;
+
val = divider_get_val(rate, parent_rate, cd->div.table, cd->div.width,
cd->div.flags);
diff --git a/drivers/clk/sunxi-ng/ccu_div.h b/drivers/clk/sunxi-ng/ccu_div.h
index 08d074451204..f3a5028dcd14 100644
--- a/drivers/clk/sunxi-ng/ccu_div.h
+++ b/drivers/clk/sunxi-ng/ccu_div.h
@@ -86,9 +86,10 @@ struct ccu_div_internal {
struct ccu_div {
u32 enable;
- struct ccu_div_internal div;
+ struct ccu_div_internal div;
struct ccu_mux_internal mux;
struct ccu_common common;
+ unsigned int fixed_post_div;
};
#define SUNXI_CCU_DIV_TABLE_WITH_GATE(_struct, _name, _parent, _reg, \
diff --git a/drivers/clk/sunxi-ng/ccu_frac.c b/drivers/clk/sunxi-ng/ccu_frac.c
index 8b5eb7756bf7..d1d168d4c4f0 100644
--- a/drivers/clk/sunxi-ng/ccu_frac.c
+++ b/drivers/clk/sunxi-ng/ccu_frac.c
@@ -67,25 +67,25 @@ unsigned long ccu_frac_helper_read_rate(struct ccu_common *common,
{
u32 reg;
- printk("%s: Read fractional\n", clk_hw_get_name(&common->hw));
+ pr_debug("%s: Read fractional\n", clk_hw_get_name(&common->hw));
if (!(common->features & CCU_FEATURE_FRACTIONAL))
return 0;
- printk("%s: clock is fractional (rates %lu and %lu)\n",
- clk_hw_get_name(&common->hw), cf->rates[0], cf->rates[1]);
+ pr_debug("%s: clock is fractional (rates %lu and %lu)\n",
+ clk_hw_get_name(&common->hw), cf->rates[0], cf->rates[1]);
reg = readl(common->base + common->reg);
- printk("%s: clock reg is 0x%x (select is 0x%x)\n",
- clk_hw_get_name(&common->hw), reg, cf->select);
+ pr_debug("%s: clock reg is 0x%x (select is 0x%x)\n",
+ clk_hw_get_name(&common->hw), reg, cf->select);
return (reg & cf->select) ? cf->rates[1] : cf->rates[0];
}
int ccu_frac_helper_set_rate(struct ccu_common *common,
struct ccu_frac_internal *cf,
- unsigned long rate)
+ unsigned long rate, u32 lock)
{
unsigned long flags;
u32 reg, sel;
@@ -106,5 +106,7 @@ int ccu_frac_helper_set_rate(struct ccu_common *common,
writel(reg | sel, common->base + common->reg);
spin_unlock_irqrestore(common->lock, flags);
+ ccu_helper_wait_for_lock(common, lock);
+
return 0;
}
diff --git a/drivers/clk/sunxi-ng/ccu_frac.h b/drivers/clk/sunxi-ng/ccu_frac.h
index 7b1ee380156f..efe2dd6bac01 100644
--- a/drivers/clk/sunxi-ng/ccu_frac.h
+++ b/drivers/clk/sunxi-ng/ccu_frac.h
@@ -48,6 +48,6 @@ unsigned long ccu_frac_helper_read_rate(struct ccu_common *common,
int ccu_frac_helper_set_rate(struct ccu_common *common,
struct ccu_frac_internal *cf,
- unsigned long rate);
+ unsigned long rate, u32 lock);
#endif /* _CCU_FRAC_H_ */
diff --git a/drivers/clk/sunxi-ng/ccu_mmc_timing.c b/drivers/clk/sunxi-ng/ccu_mmc_timing.c
new file mode 100644
index 000000000000..f9869f7353c0
--- /dev/null
+++ b/drivers/clk/sunxi-ng/ccu_mmc_timing.c
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2017 Chen-Yu Tsai. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/clk/sunxi-ng.h>
+
+#include "ccu_common.h"
+
+/**
+ * sunxi_ccu_set_mmc_timing_mode: Configure the MMC clock timing mode
+ * @clk: clock to be configured
+ * @new_mode: true for new timing mode introduced in A83T and later
+ *
+ * Returns 0 on success, -ENOTSUPP if the clock does not support
+ * switching modes.
+ */
+int sunxi_ccu_set_mmc_timing_mode(struct clk *clk, bool new_mode)
+{
+ struct clk_hw *hw = __clk_get_hw(clk);
+ struct ccu_common *cm = hw_to_ccu_common(hw);
+ unsigned long flags;
+ u32 val;
+
+ if (!(cm->features & CCU_FEATURE_MMC_TIMING_SWITCH))
+ return -ENOTSUPP;
+
+ spin_lock_irqsave(cm->lock, flags);
+
+ val = readl(cm->base + cm->reg);
+ if (new_mode)
+ val |= CCU_MMC_NEW_TIMING_MODE;
+ else
+ val &= ~CCU_MMC_NEW_TIMING_MODE;
+ writel(val, cm->base + cm->reg);
+
+ spin_unlock_irqrestore(cm->lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(sunxi_ccu_set_mmc_timing_mode);
+
+/**
+ * sunxi_ccu_set_mmc_timing_mode: Get the current MMC clock timing mode
+ * @clk: clock to query
+ *
+ * Returns 0 if the clock is in old timing mode, > 0 if it is in
+ * new timing mode, and -ENOTSUPP if the clock does not support
+ * this function.
+ */
+int sunxi_ccu_get_mmc_timing_mode(struct clk *clk)
+{
+ struct clk_hw *hw = __clk_get_hw(clk);
+ struct ccu_common *cm = hw_to_ccu_common(hw);
+
+ if (!(cm->features & CCU_FEATURE_MMC_TIMING_SWITCH))
+ return -ENOTSUPP;
+
+ return !!(readl(cm->base + cm->reg) & CCU_MMC_NEW_TIMING_MODE);
+}
+EXPORT_SYMBOL_GPL(sunxi_ccu_get_mmc_timing_mode);
diff --git a/drivers/clk/sunxi-ng/ccu_mp.c b/drivers/clk/sunxi-ng/ccu_mp.c
index b917ad7a386c..688855e7dc8c 100644
--- a/drivers/clk/sunxi-ng/ccu_mp.c
+++ b/drivers/clk/sunxi-ng/ccu_mp.c
@@ -172,3 +172,83 @@ const struct clk_ops ccu_mp_ops = {
.recalc_rate = ccu_mp_recalc_rate,
.set_rate = ccu_mp_set_rate,
};
+
+/*
+ * Support for MMC timing mode switching
+ *
+ * The MMC clocks on some SoCs support switching between old and
+ * new timing modes. A platform specific API is provided to query
+ * and set the timing mode on supported SoCs.
+ *
+ * In addition, a special class of ccu_mp_ops is provided, which
+ * takes in to account the timing mode switch. When the new timing
+ * mode is active, the clock output rate is halved. This new class
+ * is a wrapper around the generic ccu_mp_ops. When clock rates
+ * are passed through to ccu_mp_ops callbacks, they are doubled
+ * if the new timing mode bit is set, to account for the post
+ * divider. Conversely, when clock rates are passed back, they
+ * are halved if the mode bit is set.
+ */
+
+static unsigned long ccu_mp_mmc_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ unsigned long rate = ccu_mp_recalc_rate(hw, parent_rate);
+ struct ccu_common *cm = hw_to_ccu_common(hw);
+ u32 val = readl(cm->base + cm->reg);
+
+ if (val & CCU_MMC_NEW_TIMING_MODE)
+ return rate / 2;
+ return rate;
+}
+
+static int ccu_mp_mmc_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct ccu_common *cm = hw_to_ccu_common(hw);
+ u32 val = readl(cm->base + cm->reg);
+ int ret;
+
+ /* adjust the requested clock rate */
+ if (val & CCU_MMC_NEW_TIMING_MODE) {
+ req->rate *= 2;
+ req->min_rate *= 2;
+ req->max_rate *= 2;
+ }
+
+ ret = ccu_mp_determine_rate(hw, req);
+
+ /* re-adjust the requested clock rate back */
+ if (val & CCU_MMC_NEW_TIMING_MODE) {
+ req->rate /= 2;
+ req->min_rate /= 2;
+ req->max_rate /= 2;
+ }
+
+ return ret;
+}
+
+static int ccu_mp_mmc_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct ccu_common *cm = hw_to_ccu_common(hw);
+ u32 val = readl(cm->base + cm->reg);
+
+ if (val & CCU_MMC_NEW_TIMING_MODE)
+ rate *= 2;
+
+ return ccu_mp_set_rate(hw, rate, parent_rate);
+}
+
+const struct clk_ops ccu_mp_mmc_ops = {
+ .disable = ccu_mp_disable,
+ .enable = ccu_mp_enable,
+ .is_enabled = ccu_mp_is_enabled,
+
+ .get_parent = ccu_mp_get_parent,
+ .set_parent = ccu_mp_set_parent,
+
+ .determine_rate = ccu_mp_mmc_determine_rate,
+ .recalc_rate = ccu_mp_mmc_recalc_rate,
+ .set_rate = ccu_mp_mmc_set_rate,
+};
diff --git a/drivers/clk/sunxi-ng/ccu_mp.h b/drivers/clk/sunxi-ng/ccu_mp.h
index 915625e97d98..aaef11d747ea 100644
--- a/drivers/clk/sunxi-ng/ccu_mp.h
+++ b/drivers/clk/sunxi-ng/ccu_mp.h
@@ -14,6 +14,7 @@
#ifndef _CCU_MP_H_
#define _CCU_MP_H_
+#include <linux/bitops.h>
#include <linux/clk-provider.h>
#include "ccu_common.h"
@@ -74,4 +75,33 @@ static inline struct ccu_mp *hw_to_ccu_mp(struct clk_hw *hw)
extern const struct clk_ops ccu_mp_ops;
+/*
+ * Special class of M-P clock that supports MMC timing modes
+ *
+ * Since the MMC clock registers all follow the same layout, we can
+ * simplify the macro for this particular case. In addition, as
+ * switching modes also affects the output clock rate, we need to
+ * have CLK_GET_RATE_NOCACHE for all these types of clocks.
+ */
+
+#define SUNXI_CCU_MP_MMC_WITH_MUX_GATE(_struct, _name, _parents, _reg, \
+ _flags) \
+ struct ccu_mp _struct = { \
+ .enable = BIT(31), \
+ .m = _SUNXI_CCU_DIV(0, 4), \
+ .p = _SUNXI_CCU_DIV(16, 2), \
+ .mux = _SUNXI_CCU_MUX(24, 2), \
+ .common = { \
+ .reg = _reg, \
+ .features = CCU_FEATURE_MMC_TIMING_SWITCH, \
+ .hw.init = CLK_HW_INIT_PARENTS(_name, \
+ _parents, \
+ &ccu_mp_mmc_ops, \
+ CLK_GET_RATE_NOCACHE | \
+ _flags), \
+ } \
+ }
+
+extern const struct clk_ops ccu_mp_mmc_ops;
+
#endif /* _CCU_MP_H_ */
diff --git a/drivers/clk/sunxi-ng/ccu_mult.c b/drivers/clk/sunxi-ng/ccu_mult.c
index 20d0300867f2..12e0783caee6 100644
--- a/drivers/clk/sunxi-ng/ccu_mult.c
+++ b/drivers/clk/sunxi-ng/ccu_mult.c
@@ -111,10 +111,14 @@ static int ccu_mult_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long flags;
u32 reg;
- if (ccu_frac_helper_has_rate(&cm->common, &cm->frac, rate))
- return ccu_frac_helper_set_rate(&cm->common, &cm->frac, rate);
- else
+ if (ccu_frac_helper_has_rate(&cm->common, &cm->frac, rate)) {
+ ccu_frac_helper_enable(&cm->common, &cm->frac);
+
+ return ccu_frac_helper_set_rate(&cm->common, &cm->frac,
+ rate, cm->lock);
+ } else {
ccu_frac_helper_disable(&cm->common, &cm->frac);
+ }
parent_rate = ccu_mux_helper_apply_prediv(&cm->common, &cm->mux, -1,
parent_rate);
diff --git a/drivers/clk/sunxi-ng/ccu_nkm.c b/drivers/clk/sunxi-ng/ccu_nkm.c
index 44b16dc8fea6..841840e35e61 100644
--- a/drivers/clk/sunxi-ng/ccu_nkm.c
+++ b/drivers/clk/sunxi-ng/ccu_nkm.c
@@ -75,7 +75,7 @@ static unsigned long ccu_nkm_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct ccu_nkm *nkm = hw_to_ccu_nkm(hw);
- unsigned long n, m, k;
+ unsigned long n, m, k, rate;
u32 reg;
reg = readl(nkm->common.base + nkm->common.reg);
@@ -98,7 +98,12 @@ static unsigned long ccu_nkm_recalc_rate(struct clk_hw *hw,
if (!m)
m++;
- return parent_rate * n * k / m;
+ rate = parent_rate * n * k / m;
+
+ if (nkm->common.features & CCU_FEATURE_FIXED_POSTDIV)
+ rate /= nkm->fixed_post_div;
+
+ return rate;
}
static unsigned long ccu_nkm_round_rate(struct ccu_mux_internal *mux,
@@ -117,9 +122,17 @@ static unsigned long ccu_nkm_round_rate(struct ccu_mux_internal *mux,
_nkm.min_m = 1;
_nkm.max_m = nkm->m.max ?: 1 << nkm->m.width;
+ if (nkm->common.features & CCU_FEATURE_FIXED_POSTDIV)
+ rate *= nkm->fixed_post_div;
+
ccu_nkm_find_best(*parent_rate, rate, &_nkm);
- return *parent_rate * _nkm.n * _nkm.k / _nkm.m;
+ rate = *parent_rate * _nkm.n * _nkm.k / _nkm.m;
+
+ if (nkm->common.features & CCU_FEATURE_FIXED_POSTDIV)
+ rate /= nkm->fixed_post_div;
+
+ return rate;
}
static int ccu_nkm_determine_rate(struct clk_hw *hw,
@@ -139,6 +152,9 @@ static int ccu_nkm_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long flags;
u32 reg;
+ if (nkm->common.features & CCU_FEATURE_FIXED_POSTDIV)
+ rate *= nkm->fixed_post_div;
+
_nkm.min_n = nkm->n.min ?: 1;
_nkm.max_n = nkm->n.max ?: 1 << nkm->n.width;
_nkm.min_k = nkm->k.min ?: 1;
diff --git a/drivers/clk/sunxi-ng/ccu_nkm.h b/drivers/clk/sunxi-ng/ccu_nkm.h
index 34580894f4d1..cc6efb70a102 100644
--- a/drivers/clk/sunxi-ng/ccu_nkm.h
+++ b/drivers/clk/sunxi-ng/ccu_nkm.h
@@ -34,6 +34,8 @@ struct ccu_nkm {
struct ccu_div_internal m;
struct ccu_mux_internal mux;
+ unsigned int fixed_post_div;
+
struct ccu_common common;
};
diff --git a/drivers/clk/sunxi-ng/ccu_nm.c b/drivers/clk/sunxi-ng/ccu_nm.c
index 5e5e90a4a50c..a32158e8f2e3 100644
--- a/drivers/clk/sunxi-ng/ccu_nm.c
+++ b/drivers/clk/sunxi-ng/ccu_nm.c
@@ -117,10 +117,23 @@ static int ccu_nm_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long flags;
u32 reg;
- if (ccu_frac_helper_has_rate(&nm->common, &nm->frac, rate))
- return ccu_frac_helper_set_rate(&nm->common, &nm->frac, rate);
- else
+ if (ccu_frac_helper_has_rate(&nm->common, &nm->frac, rate)) {
+ spin_lock_irqsave(nm->common.lock, flags);
+
+ /* most SoCs require M to be 0 if fractional mode is used */
+ reg = readl(nm->common.base + nm->common.reg);
+ reg &= ~GENMASK(nm->m.width + nm->m.shift - 1, nm->m.shift);
+ writel(reg, nm->common.base + nm->common.reg);
+
+ spin_unlock_irqrestore(nm->common.lock, flags);
+
+ ccu_frac_helper_enable(&nm->common, &nm->frac);
+
+ return ccu_frac_helper_set_rate(&nm->common, &nm->frac,
+ rate, nm->lock);
+ } else {
ccu_frac_helper_disable(&nm->common, &nm->frac);
+ }
_nm.min_n = nm->n.min ?: 1;
_nm.max_n = nm->n.max ?: 1 << nm->n.width;
diff --git a/drivers/clk/sunxi-ng/ccu_reset.c b/drivers/clk/sunxi-ng/ccu_reset.c
index 6c31d48783a7..1dc4e98ea802 100644
--- a/drivers/clk/sunxi-ng/ccu_reset.c
+++ b/drivers/clk/sunxi-ng/ccu_reset.c
@@ -8,6 +8,7 @@
* the License, or (at your option) any later version.
*/
+#include <linux/delay.h>
#include <linux/io.h>
#include <linux/reset-controller.h>
@@ -49,7 +50,18 @@ static int ccu_reset_deassert(struct reset_controller_dev *rcdev,
return 0;
}
+static int ccu_reset_reset(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ ccu_reset_assert(rcdev, id);
+ udelay(10);
+ ccu_reset_deassert(rcdev, id);
+
+ return 0;
+}
+
const struct reset_control_ops ccu_reset_ops = {
.assert = ccu_reset_assert,
.deassert = ccu_reset_deassert,
+ .reset = ccu_reset_reset,
};
diff --git a/drivers/clk/sunxi/clk-sun8i-bus-gates.c b/drivers/clk/sunxi/clk-sun8i-bus-gates.c
index 63fdb790df29..bee305bdddbe 100644
--- a/drivers/clk/sunxi/clk-sun8i-bus-gates.c
+++ b/drivers/clk/sunxi/clk-sun8i-bus-gates.c
@@ -78,6 +78,10 @@ static void __init sun8i_h3_bus_gates_init(struct device_node *node)
clk_parent = APB1;
else if (index >= 96 && index <= 127)
clk_parent = APB2;
+ else {
+ WARN_ON(true);
+ continue;
+ }
clk_reg = reg + 4 * (index / 32);
clk_bit = index % 32;
diff --git a/drivers/clk/sunxi/clk-sunxi.c b/drivers/clk/sunxi/clk-sunxi.c
index f2c9274b8bd5..aa4add580516 100644
--- a/drivers/clk/sunxi/clk-sunxi.c
+++ b/drivers/clk/sunxi/clk-sunxi.c
@@ -666,15 +666,14 @@ static struct clk * __init sunxi_mux_clk_setup(struct device_node *node,
reg = of_iomap(node, 0);
if (!reg) {
- pr_err("Could not map registers for mux-clk: %s\n",
- of_node_full_name(node));
+ pr_err("Could not map registers for mux-clk: %pOF\n", node);
return NULL;
}
i = of_clk_parent_fill(node, parents, SUNXI_MAX_PARENTS);
if (of_property_read_string(node, "clock-output-names", &clk_name)) {
- pr_err("%s: could not read clock-output-names from \"%s\"\n",
- __func__, of_node_full_name(node));
+ pr_err("%s: could not read clock-output-names from \"%pOF\"\n",
+ __func__, node);
goto out_unmap;
}
@@ -797,16 +796,15 @@ static void __init sunxi_divider_clk_setup(struct device_node *node,
reg = of_iomap(node, 0);
if (!reg) {
- pr_err("Could not map registers for mux-clk: %s\n",
- of_node_full_name(node));
+ pr_err("Could not map registers for mux-clk: %pOF\n", node);
return;
}
clk_parent = of_clk_get_parent_name(node, 0);
if (of_property_read_string(node, "clock-output-names", &clk_name)) {
- pr_err("%s: could not read clock-output-names from \"%s\"\n",
- __func__, of_node_full_name(node));
+ pr_err("%s: could not read clock-output-names from \"%pOF\"\n",
+ __func__, node);
goto out_unmap;
}
@@ -1010,8 +1008,7 @@ static struct clk ** __init sunxi_divs_clk_setup(struct device_node *node,
reg = of_iomap(node, 0);
if (!reg) {
- pr_err("Could not map registers for divs-clk: %s\n",
- of_node_full_name(node));
+ pr_err("Could not map registers for divs-clk: %pOF\n", node);
return NULL;
}
diff --git a/drivers/clk/tegra/clk-emc.c b/drivers/clk/tegra/clk-emc.c
index 74e7544f861b..11a5066e5c27 100644
--- a/drivers/clk/tegra/clk-emc.c
+++ b/drivers/clk/tegra/clk-emc.c
@@ -378,7 +378,7 @@ static int load_one_timing_from_dt(struct tegra_clk_emc *tegra,
err = of_property_read_u32(node, "clock-frequency", &tmp);
if (err) {
- pr_err("timing %s: failed to read rate\n", node->full_name);
+ pr_err("timing %pOF: failed to read rate\n", node);
return err;
}
@@ -386,8 +386,7 @@ static int load_one_timing_from_dt(struct tegra_clk_emc *tegra,
err = of_property_read_u32(node, "nvidia,parent-clock-frequency", &tmp);
if (err) {
- pr_err("timing %s: failed to read parent rate\n",
- node->full_name);
+ pr_err("timing %pOF: failed to read parent rate\n", node);
return err;
}
@@ -395,8 +394,7 @@ static int load_one_timing_from_dt(struct tegra_clk_emc *tegra,
timing->parent = of_clk_get_by_name(node, "emc-parent");
if (IS_ERR(timing->parent)) {
- pr_err("timing %s: failed to get parent clock\n",
- node->full_name);
+ pr_err("timing %pOF: failed to get parent clock\n", node);
return PTR_ERR(timing->parent);
}
@@ -409,8 +407,8 @@ static int load_one_timing_from_dt(struct tegra_clk_emc *tegra,
}
}
if (timing->parent_index == 0xff) {
- pr_err("timing %s: %s is not a valid parent\n",
- node->full_name, __clk_get_name(timing->parent));
+ pr_err("timing %pOF: %s is not a valid parent\n",
+ node, __clk_get_name(timing->parent));
clk_put(timing->parent);
return -EINVAL;
}
diff --git a/drivers/clk/tegra/clk-pll.c b/drivers/clk/tegra/clk-pll.c
index 159a854779e6..7c369e21c91c 100644
--- a/drivers/clk/tegra/clk-pll.c
+++ b/drivers/clk/tegra/clk-pll.c
@@ -363,7 +363,7 @@ static void _clk_pll_enable(struct clk_hw *hw)
val = pll_readl(pll->params->iddq_reg, pll);
val &= ~BIT(pll->params->iddq_bit_idx);
pll_writel(val, pll->params->iddq_reg, pll);
- udelay(2);
+ udelay(5);
}
if (pll->params->reset_reg) {
@@ -418,6 +418,26 @@ static void _clk_pll_disable(struct clk_hw *hw)
}
}
+static void pll_clk_start_ss(struct tegra_clk_pll *pll)
+{
+ if (pll->params->defaults_set && pll->params->ssc_ctrl_reg) {
+ u32 val = pll_readl(pll->params->ssc_ctrl_reg, pll);
+
+ val |= pll->params->ssc_ctrl_en_mask;
+ pll_writel(val, pll->params->ssc_ctrl_reg, pll);
+ }
+}
+
+static void pll_clk_stop_ss(struct tegra_clk_pll *pll)
+{
+ if (pll->params->defaults_set && pll->params->ssc_ctrl_reg) {
+ u32 val = pll_readl(pll->params->ssc_ctrl_reg, pll);
+
+ val &= ~pll->params->ssc_ctrl_en_mask;
+ pll_writel(val, pll->params->ssc_ctrl_reg, pll);
+ }
+}
+
static int clk_pll_enable(struct clk_hw *hw)
{
struct tegra_clk_pll *pll = to_clk_pll(hw);
@@ -431,6 +451,8 @@ static int clk_pll_enable(struct clk_hw *hw)
ret = clk_pll_wait_for_lock(pll);
+ pll_clk_start_ss(pll);
+
if (pll->lock)
spin_unlock_irqrestore(pll->lock, flags);
@@ -445,6 +467,8 @@ static void clk_pll_disable(struct clk_hw *hw)
if (pll->lock)
spin_lock_irqsave(pll->lock, flags);
+ pll_clk_stop_ss(pll);
+
_clk_pll_disable(hw);
if (pll->lock)
@@ -666,6 +690,8 @@ static void _get_pll_mnp(struct tegra_clk_pll *pll,
struct tegra_clk_pll_params *params = pll->params;
struct div_nmp *div_nmp = params->div_nmp;
+ *cfg = (struct tegra_clk_pll_freq_table) { };
+
if ((params->flags & (TEGRA_PLLM | TEGRA_PLLMB)) &&
(pll_override_readl(PMC_PLLP_WB0_OVERRIDE, pll) &
PMC_PLLP_WB0_OVERRIDE_PLLM_OVERRIDE)) {
@@ -716,26 +742,6 @@ static void _update_pll_cpcon(struct tegra_clk_pll *pll,
pll_writel_misc(val, pll);
}
-static void pll_clk_start_ss(struct tegra_clk_pll *pll)
-{
- if (pll->params->defaults_set && pll->params->ssc_ctrl_reg) {
- u32 val = pll_readl(pll->params->ssc_ctrl_reg, pll);
-
- val |= pll->params->ssc_ctrl_en_mask;
- pll_writel(val, pll->params->ssc_ctrl_reg, pll);
- }
-}
-
-static void pll_clk_stop_ss(struct tegra_clk_pll *pll)
-{
- if (pll->params->defaults_set && pll->params->ssc_ctrl_reg) {
- u32 val = pll_readl(pll->params->ssc_ctrl_reg, pll);
-
- val &= ~pll->params->ssc_ctrl_en_mask;
- pll_writel(val, pll->params->ssc_ctrl_reg, pll);
- }
-}
-
static int _program_pll(struct clk_hw *hw, struct tegra_clk_pll_freq_table *cfg,
unsigned long rate)
{
@@ -2251,7 +2257,7 @@ tegra_clk_register_pllu_tegra114(const char *name, const char *parent_name,
}
#endif
-#if defined(CONFIG_ARCH_TEGRA_124_SOC) || defined(CONFIG_ARCH_TEGRA_132_SOC)
+#if defined(CONFIG_ARCH_TEGRA_124_SOC) || defined(CONFIG_ARCH_TEGRA_132_SOC) || defined(CONFIG_ARCH_TEGRA_210_SOC)
static const struct clk_ops tegra_clk_pllss_ops = {
.is_enabled = clk_pll_is_enabled,
.enable = clk_pll_enable,
@@ -2349,7 +2355,6 @@ struct clk *tegra_clk_register_pllre_tegra210(const char *name,
struct tegra_clk_pll_params *pll_params,
spinlock_t *lock, unsigned long parent_rate)
{
- u32 val;
struct tegra_clk_pll *pll;
struct clk *clk;
@@ -2363,26 +2368,8 @@ struct clk *tegra_clk_register_pllre_tegra210(const char *name,
if (IS_ERR(pll))
return ERR_CAST(pll);
- /* program minimum rate by default */
-
- val = pll_readl_base(pll);
- if (val & PLL_BASE_ENABLE)
- WARN_ON(readl_relaxed(clk_base + pll_params->iddq_reg) &
- BIT(pll_params->iddq_bit_idx));
- else {
- val = 0x4 << divm_shift(pll);
- val |= 0x41 << divn_shift(pll);
- pll_writel_base(val, pll);
- }
-
- /* disable lock override */
-
- val = pll_readl_misc(pll);
- val &= ~BIT(29);
- pll_writel_misc(val, pll);
-
clk = _tegra_clk_register_pll(pll, name, parent_name, flags,
- &tegra_clk_pllre_ops);
+ &tegra_clk_pll_ops);
if (IS_ERR(clk))
kfree(pll);
@@ -2604,46 +2591,6 @@ struct clk *tegra_clk_register_pllc_tegra210(const char *name,
return clk;
}
-struct clk *tegra_clk_register_pllxc_tegra210(const char *name,
- const char *parent_name, void __iomem *clk_base,
- void __iomem *pmc, unsigned long flags,
- struct tegra_clk_pll_params *pll_params,
- spinlock_t *lock)
-{
- struct tegra_clk_pll *pll;
- struct clk *clk, *parent;
- unsigned long parent_rate;
-
- parent = __clk_lookup(parent_name);
- if (!parent) {
- WARN(1, "parent clk %s of %s must be registered first\n",
- name, parent_name);
- return ERR_PTR(-EINVAL);
- }
-
- if (!pll_params->pdiv_tohw)
- return ERR_PTR(-EINVAL);
-
- parent_rate = clk_get_rate(parent);
-
- pll_params->vco_min = _clip_vco_min(pll_params->vco_min, parent_rate);
-
- if (pll_params->adjust_vco)
- pll_params->vco_min = pll_params->adjust_vco(pll_params,
- parent_rate);
-
- pll = _tegra_init_pll(clk_base, pmc, pll_params, lock);
- if (IS_ERR(pll))
- return ERR_CAST(pll);
-
- clk = _tegra_clk_register_pll(pll, name, parent_name, flags,
- &tegra_clk_pll_ops);
- if (IS_ERR(clk))
- kfree(pll);
-
- return clk;
-}
-
struct clk *tegra_clk_register_pllss_tegra210(const char *name,
const char *parent_name, void __iomem *clk_base,
unsigned long flags,
@@ -2652,10 +2599,8 @@ struct clk *tegra_clk_register_pllss_tegra210(const char *name,
{
struct tegra_clk_pll *pll;
struct clk *clk, *parent;
- struct tegra_clk_pll_freq_table cfg;
unsigned long parent_rate;
u32 val;
- int i;
if (!pll_params->div_nmp)
return ERR_PTR(-EINVAL);
@@ -2667,13 +2612,11 @@ struct clk *tegra_clk_register_pllss_tegra210(const char *name,
return ERR_PTR(-EINVAL);
}
- pll = _tegra_init_pll(clk_base, NULL, pll_params, lock);
- if (IS_ERR(pll))
- return ERR_CAST(pll);
-
- val = pll_readl_base(pll);
- val &= ~PLLSS_REF_SRC_SEL_MASK;
- pll_writel_base(val, pll);
+ val = readl_relaxed(clk_base + pll_params->base_reg);
+ if (val & PLLSS_REF_SRC_SEL_MASK) {
+ WARN(1, "not supported reference clock for %s\n", name);
+ return ERR_PTR(-EINVAL);
+ }
parent_rate = clk_get_rate(parent);
@@ -2683,36 +2626,10 @@ struct clk *tegra_clk_register_pllss_tegra210(const char *name,
pll_params->vco_min = pll_params->adjust_vco(pll_params,
parent_rate);
- /* initialize PLL to minimum rate */
-
- cfg.m = _pll_fixed_mdiv(pll_params, parent_rate);
- cfg.n = cfg.m * pll_params->vco_min / parent_rate;
-
- for (i = 0; pll_params->pdiv_tohw[i].pdiv; i++)
- ;
- if (!i) {
- kfree(pll);
- return ERR_PTR(-EINVAL);
- }
-
- cfg.p = pll_params->pdiv_tohw[i-1].hw_val;
-
- _update_pll_mnp(pll, &cfg);
-
- pll_writel_misc(PLLSS_MISC_DEFAULT, pll);
-
- val = pll_readl_base(pll);
- if (val & PLL_BASE_ENABLE) {
- if (val & BIT(pll_params->iddq_bit_idx)) {
- WARN(1, "%s is on but IDDQ set\n", name);
- kfree(pll);
- return ERR_PTR(-EINVAL);
- }
- } else
- val |= BIT(pll_params->iddq_bit_idx);
-
- val &= ~PLLSS_LOCK_OVERRIDE;
- pll_writel_base(val, pll);
+ pll_params->flags |= TEGRA_PLL_BYPASS;
+ pll = _tegra_init_pll(clk_base, NULL, pll_params, lock);
+ if (IS_ERR(pll))
+ return ERR_CAST(pll);
clk = _tegra_clk_register_pll(pll, name, parent_name, flags,
&tegra_clk_pll_ops);
diff --git a/drivers/clk/tegra/clk-tegra-periph.c b/drivers/clk/tegra/clk-tegra-periph.c
index 294bfe40a4f5..848255cc0209 100644
--- a/drivers/clk/tegra/clk-tegra-periph.c
+++ b/drivers/clk/tegra/clk-tegra-periph.c
@@ -216,7 +216,8 @@
_clk_num, _clk_id) \
TEGRA_INIT_DATA_TABLE(_name, NULL, NULL, _parents, _offset,\
30, MASK(2), 0, 0, 16, 0, TEGRA_DIVIDER_ROUND_UP,\
- _clk_num, 0, _clk_id, _parents##_idx, 0, NULL)
+ _clk_num, TEGRA_PERIPH_ON_APB, _clk_id, \
+ _parents##_idx, 0, NULL)
#define XUSB(_name, _parents, _offset, \
_clk_num, _gate_flags, _clk_id) \
diff --git a/drivers/clk/tegra/clk-tegra-super-gen4.c b/drivers/clk/tegra/clk-tegra-super-gen4.c
index 474de0f0c26d..4f6fd307cb70 100644
--- a/drivers/clk/tegra/clk-tegra-super-gen4.c
+++ b/drivers/clk/tegra/clk-tegra-super-gen4.c
@@ -232,8 +232,15 @@ static void __init tegra_super_clk_init(void __iomem *clk_base,
if (!dt_clk)
return;
- clk = tegra_clk_register_pllxc("pll_x", "pll_ref", clk_base,
- pmc_base, CLK_IGNORE_UNUSED, params, NULL);
+#if defined(CONFIG_ARCH_TEGRA_210_SOC)
+ if (gen_info->gen == gen5)
+ clk = tegra_clk_register_pllc_tegra210("pll_x", "pll_ref",
+ clk_base, pmc_base, CLK_IGNORE_UNUSED, params, NULL);
+ else
+#endif
+ clk = tegra_clk_register_pllxc("pll_x", "pll_ref", clk_base,
+ pmc_base, CLK_IGNORE_UNUSED, params, NULL);
+
*dt_clk = clk;
/* PLLX_OUT0 */
diff --git a/drivers/clk/tegra/clk-tegra210.c b/drivers/clk/tegra/clk-tegra210.c
index 1024e853ea65..6d7a613f2656 100644
--- a/drivers/clk/tegra/clk-tegra210.c
+++ b/drivers/clk/tegra/clk-tegra210.c
@@ -146,7 +146,7 @@
#define PLLD_SDM_EN_MASK BIT(16)
#define PLLD2_SDM_EN_MASK BIT(31)
-#define PLLD2_SSC_EN_MASK BIT(30)
+#define PLLD2_SSC_EN_MASK 0
#define PLLDP_SS_CFG 0x598
#define PLLDP_SDM_EN_MASK BIT(31)
@@ -241,6 +241,9 @@
#define PLL_SDM_COEFF BIT(13)
#define sdin_din_to_data(din) ((u16)((din) ? : 0xFFFFU))
#define sdin_data_to_din(dat) (((dat) == 0xFFFFU) ? 0 : (s16)dat)
+/* This macro returns ndiv effective scaled to SDM range */
+#define sdin_get_n_eff(cfg) ((cfg)->n * PLL_SDM_COEFF + ((cfg)->sdm_data ? \
+ (PLL_SDM_COEFF/2 + sdin_data_to_din((cfg)->sdm_data)) : 0))
/* Tegra CPU clock and reset control regs */
#define CLK_RST_CONTROLLER_CPU_CMPLX_STATUS 0x470
@@ -715,8 +718,6 @@ static void plldss_defaults(const char *pll_name, struct tegra_clk_pll *plldss,
plldss->params->defaults_set = true;
if (val & PLL_ENABLE) {
- pr_warn("%s already enabled. Postponing set full defaults\n",
- pll_name);
/*
* PLL is ON: check if defaults already set, then set those
@@ -755,6 +756,10 @@ static void plldss_defaults(const char *pll_name, struct tegra_clk_pll *plldss,
(~PLLDSS_MISC1_CFG_EN_SDM));
}
+ if (!plldss->params->defaults_set)
+ pr_warn("%s already enabled. Postponing set full defaults\n",
+ pll_name);
+
/* Enable lock detect */
if (val & PLLDSS_BASE_LOCK_OVERRIDE) {
val &= ~PLLDSS_BASE_LOCK_OVERRIDE;
@@ -1288,8 +1293,7 @@ static int tegra210_pll_fixed_mdiv_cfg(struct clk_hw *hw,
s -= PLL_SDM_COEFF / 2;
cfg->sdm_data = sdin_din_to_data(s);
}
- cfg->output_rate *= cfg->n * PLL_SDM_COEFF + PLL_SDM_COEFF/2 +
- sdin_data_to_din(cfg->sdm_data);
+ cfg->output_rate *= sdin_get_n_eff(cfg);
cfg->output_rate /= p * cfg->m * PLL_SDM_COEFF;
} else {
cfg->output_rate *= cfg->n;
@@ -1314,8 +1318,7 @@ static int tegra210_pll_fixed_mdiv_cfg(struct clk_hw *hw,
*/
static void tegra210_clk_pll_set_gain(struct tegra_clk_pll_freq_table *cfg)
{
- cfg->n = cfg->n * PLL_SDM_COEFF + PLL_SDM_COEFF/2 +
- sdin_data_to_din(cfg->sdm_data);
+ cfg->n = sdin_get_n_eff(cfg);
cfg->m *= PLL_SDM_COEFF;
}
@@ -2204,7 +2207,6 @@ static struct tegra_clk tegra210_clks[tegra_clk_max] __initdata = {
[tegra_clk_gpu] = { .dt_id = TEGRA210_CLK_GPU, .present = true },
[tegra_clk_pll_g_ref] = { .dt_id = TEGRA210_CLK_PLL_G_REF, .present = true, },
[tegra_clk_uartb_8] = { .dt_id = TEGRA210_CLK_UARTB, .present = true },
- [tegra_clk_vfir] = { .dt_id = TEGRA210_CLK_VFIR, .present = true },
[tegra_clk_spdif_in_8] = { .dt_id = TEGRA210_CLK_SPDIF_IN, .present = true },
[tegra_clk_spdif_out] = { .dt_id = TEGRA210_CLK_SPDIF_OUT, .present = true },
[tegra_clk_vi_10] = { .dt_id = TEGRA210_CLK_VI, .present = true },
@@ -2470,15 +2472,14 @@ static void tegra210_utmi_param_configure(void)
reg |= UTMIP_PLL_CFG2_STABLE_COUNT(utmi_parameters[i].stable_count);
reg &= ~UTMIP_PLL_CFG2_ACTIVE_DLY_COUNT(~0);
-
reg |=
UTMIP_PLL_CFG2_ACTIVE_DLY_COUNT(utmi_parameters[i].active_delay_count);
writel_relaxed(reg, clk_base + UTMIP_PLL_CFG2);
/* Program UTMIP PLL delay and oscillator frequency counts */
reg = readl_relaxed(clk_base + UTMIP_PLL_CFG1);
- reg &= ~UTMIP_PLL_CFG1_ENABLE_DLY_COUNT(~0);
+ reg &= ~UTMIP_PLL_CFG1_ENABLE_DLY_COUNT(~0);
reg |=
UTMIP_PLL_CFG1_ENABLE_DLY_COUNT(utmi_parameters[i].enable_delay_count);
@@ -2494,7 +2495,8 @@ static void tegra210_utmi_param_configure(void)
reg &= ~UTMIP_PLL_CFG1_FORCE_PLL_ENABLE_POWERDOWN;
reg |= UTMIP_PLL_CFG1_FORCE_PLL_ENABLE_POWERUP;
writel_relaxed(reg, clk_base + UTMIP_PLL_CFG1);
- udelay(1);
+
+ udelay(20);
/* Enable samplers for SNPS, XUSB_HOST, XUSB_DEV */
reg = readl_relaxed(clk_base + UTMIP_PLL_CFG2);
@@ -2552,6 +2554,7 @@ static int tegra210_enable_pllu(void)
reg = readl_relaxed(clk_base + pllu.params->ext_misc_reg[0]);
reg &= ~BIT(pllu.params->iddq_bit_idx);
writel_relaxed(reg, clk_base + pllu.params->ext_misc_reg[0]);
+ udelay(5);
reg = readl_relaxed(clk_base + PLLU_BASE);
reg &= ~GENMASK(20, 0);
@@ -2559,6 +2562,7 @@ static int tegra210_enable_pllu(void)
reg |= fentry->n << 8;
reg |= fentry->p << 16;
writel(reg, clk_base + PLLU_BASE);
+ udelay(1);
reg |= PLL_ENABLE;
writel(reg, clk_base + PLLU_BASE);
@@ -2699,7 +2703,7 @@ static void __init tegra210_pll_init(void __iomem *clk_base,
struct clk *clk;
/* PLLC */
- clk = tegra_clk_register_pllxc_tegra210("pll_c", "pll_ref", clk_base,
+ clk = tegra_clk_register_pllc_tegra210("pll_c", "pll_ref", clk_base,
pmc, 0, &pll_c_params, NULL);
if (!WARN_ON(IS_ERR(clk)))
clk_register_clkdev(clk, "pll_c", NULL);
@@ -2798,14 +2802,14 @@ static void __init tegra210_pll_init(void __iomem *clk_base,
/* PLLU_60M */
clk = clk_register_gate(NULL, "pll_u_60M", "pll_u_out2",
CLK_SET_RATE_PARENT, clk_base + PLLU_BASE,
- 23, 0, NULL);
+ 23, 0, &pll_u_lock);
clk_register_clkdev(clk, "pll_u_60M", NULL);
clks[TEGRA210_CLK_PLL_U_60M] = clk;
/* PLLU_48M */
clk = clk_register_gate(NULL, "pll_u_48M", "pll_u_out1",
CLK_SET_RATE_PARENT, clk_base + PLLU_BASE,
- 25, 0, NULL);
+ 25, 0, &pll_u_lock);
clk_register_clkdev(clk, "pll_u_48M", NULL);
clks[TEGRA210_CLK_PLL_U_48M] = clk;
diff --git a/drivers/clk/tegra/clk.h b/drivers/clk/tegra/clk.h
index 945b07093afa..872f1189ad7f 100644
--- a/drivers/clk/tegra/clk.h
+++ b/drivers/clk/tegra/clk.h
@@ -362,12 +362,6 @@ struct clk *tegra_clk_register_pllxc(const char *name, const char *parent_name,
struct tegra_clk_pll_params *pll_params,
spinlock_t *lock);
-struct clk *tegra_clk_register_pllxc_tegra210(const char *name,
- const char *parent_name, void __iomem *clk_base,
- void __iomem *pmc, unsigned long flags,
- struct tegra_clk_pll_params *pll_params,
- spinlock_t *lock);
-
struct clk *tegra_clk_register_pllm(const char *name, const char *parent_name,
void __iomem *clk_base, void __iomem *pmc,
unsigned long flags,
diff --git a/drivers/clk/ti/adpll.c b/drivers/clk/ti/adpll.c
index 255cafb18336..d6036c788fab 100644
--- a/drivers/clk/ti/adpll.c
+++ b/drivers/clk/ti/adpll.c
@@ -222,7 +222,7 @@ static int ti_adpll_setup_clock(struct ti_adpll_data *d, struct clk *clock,
/* Separate con_id in format "pll040dcoclkldo" to fit MAX_CON_ID */
postfix = strrchr(name, '.');
- if (strlen(postfix) > 1) {
+ if (postfix && strlen(postfix) > 1) {
if (strlen(postfix) > ADPLL_MAX_CON_ID)
dev_warn(d->dev, "clock %s con_id lookup may fail\n",
name);
@@ -486,7 +486,7 @@ static u8 ti_adpll_get_parent(struct clk_hw *hw)
return 0;
}
-static struct clk_ops ti_adpll_ops = {
+static const struct clk_ops ti_adpll_ops = {
.prepare = ti_adpll_prepare,
.unprepare = ti_adpll_unprepare,
.is_prepared = ti_adpll_is_prepared,
diff --git a/drivers/clk/ti/apll.c b/drivers/clk/ti/apll.c
index 06f486b3488c..83b148f8037c 100644
--- a/drivers/clk/ti/apll.c
+++ b/drivers/clk/ti/apll.c
@@ -304,7 +304,7 @@ static void omap2_apll_disable(struct clk_hw *hw)
ti_clk_ll_ops->clk_writel(v, &ad->control_reg);
}
-static struct clk_ops omap2_apll_ops = {
+static const struct clk_ops omap2_apll_ops = {
.enable = &omap2_apll_enable,
.disable = &omap2_apll_disable,
.is_enabled = &omap2_apll_is_enabled,
diff --git a/drivers/clk/ti/clockdomain.c b/drivers/clk/ti/clockdomain.c
index fbedc6a9fed0..07a805125e98 100644
--- a/drivers/clk/ti/clockdomain.c
+++ b/drivers/clk/ti/clockdomain.c
@@ -138,8 +138,8 @@ static void __init of_ti_clockdomain_setup(struct device_node *node)
for (i = 0; i < num_clks; i++) {
clk = of_clk_get(node, i);
if (IS_ERR(clk)) {
- pr_err("%s: Failed get %s' clock nr %d (%ld)\n",
- __func__, node->full_name, i, PTR_ERR(clk));
+ pr_err("%s: Failed get %pOF' clock nr %d (%ld)\n",
+ __func__, node, i, PTR_ERR(clk));
continue;
}
clk_hw = __clk_get_hw(clk);
diff --git a/drivers/clk/ti/fapll.c b/drivers/clk/ti/fapll.c
index 66a0d0ed8b55..071af44b1ba8 100644
--- a/drivers/clk/ti/fapll.c
+++ b/drivers/clk/ti/fapll.c
@@ -268,7 +268,7 @@ static int ti_fapll_set_rate(struct clk_hw *hw, unsigned long rate,
return 0;
}
-static struct clk_ops ti_fapll_ops = {
+static const struct clk_ops ti_fapll_ops = {
.enable = ti_fapll_enable,
.disable = ti_fapll_disable,
.is_enabled = ti_fapll_is_enabled,
@@ -478,7 +478,7 @@ static int ti_fapll_synth_set_rate(struct clk_hw *hw, unsigned long rate,
return 0;
}
-static struct clk_ops ti_fapll_synt_ops = {
+static const struct clk_ops ti_fapll_synt_ops = {
.enable = ti_fapll_synth_enable,
.disable = ti_fapll_synth_disable,
.is_enabled = ti_fapll_synth_is_enabled,
diff --git a/drivers/clk/uniphier/clk-uniphier-core.c b/drivers/clk/uniphier/clk-uniphier-core.c
index 2cf386347f0c..e09f3dd46318 100644
--- a/drivers/clk/uniphier/clk-uniphier-core.c
+++ b/drivers/clk/uniphier/clk-uniphier-core.c
@@ -111,10 +111,6 @@ static int uniphier_clk_remove(struct platform_device *pdev)
static const struct of_device_id uniphier_clk_match[] = {
/* System clock */
{
- .compatible = "socionext,uniphier-sld3-clock",
- .data = uniphier_sld3_sys_clk_data,
- },
- {
.compatible = "socionext,uniphier-ld4-clock",
.data = uniphier_ld4_sys_clk_data,
},
@@ -142,22 +138,22 @@ static const struct of_device_id uniphier_clk_match[] = {
.compatible = "socionext,uniphier-ld20-clock",
.data = uniphier_ld20_sys_clk_data,
},
- /* Media I/O clock, SD clock */
{
- .compatible = "socionext,uniphier-sld3-mio-clock",
- .data = uniphier_sld3_mio_clk_data,
+ .compatible = "socionext,uniphier-pxs3-clock",
+ .data = uniphier_pxs3_sys_clk_data,
},
+ /* Media I/O clock, SD clock */
{
.compatible = "socionext,uniphier-ld4-mio-clock",
- .data = uniphier_sld3_mio_clk_data,
+ .data = uniphier_ld4_mio_clk_data,
},
{
.compatible = "socionext,uniphier-pro4-mio-clock",
- .data = uniphier_sld3_mio_clk_data,
+ .data = uniphier_ld4_mio_clk_data,
},
{
.compatible = "socionext,uniphier-sld8-mio-clock",
- .data = uniphier_sld3_mio_clk_data,
+ .data = uniphier_ld4_mio_clk_data,
},
{
.compatible = "socionext,uniphier-pro5-sd-clock",
@@ -169,12 +165,16 @@ static const struct of_device_id uniphier_clk_match[] = {
},
{
.compatible = "socionext,uniphier-ld11-mio-clock",
- .data = uniphier_sld3_mio_clk_data,
+ .data = uniphier_ld4_mio_clk_data,
},
{
.compatible = "socionext,uniphier-ld20-sd-clock",
.data = uniphier_pro5_sd_clk_data,
},
+ {
+ .compatible = "socionext,uniphier-pxs3-sd-clock",
+ .data = uniphier_pro5_sd_clk_data,
+ },
/* Peripheral clock */
{
.compatible = "socionext,uniphier-ld4-peri-clock",
@@ -204,6 +204,10 @@ static const struct of_device_id uniphier_clk_match[] = {
.compatible = "socionext,uniphier-ld20-peri-clock",
.data = uniphier_pro4_peri_clk_data,
},
+ {
+ .compatible = "socionext,uniphier-pxs3-peri-clock",
+ .data = uniphier_pro4_peri_clk_data,
+ },
{ /* sentinel */ }
};
diff --git a/drivers/clk/uniphier/clk-uniphier-mio.c b/drivers/clk/uniphier/clk-uniphier-mio.c
index 218d20f099ce..16e4d303f535 100644
--- a/drivers/clk/uniphier/clk-uniphier-mio.c
+++ b/drivers/clk/uniphier/clk-uniphier-mio.c
@@ -76,7 +76,7 @@
#define UNIPHIER_MIO_CLK_DMAC(idx) \
UNIPHIER_CLK_GATE("miodmac", (idx), "stdmac", 0x20, 25)
-const struct uniphier_clk_data uniphier_sld3_mio_clk_data[] = {
+const struct uniphier_clk_data uniphier_ld4_mio_clk_data[] = {
UNIPHIER_MIO_CLK_SD_FIXED,
UNIPHIER_MIO_CLK_SD(0, 0),
UNIPHIER_MIO_CLK_SD(1, 1),
@@ -85,11 +85,9 @@ const struct uniphier_clk_data uniphier_sld3_mio_clk_data[] = {
UNIPHIER_MIO_CLK_USB2(8, 0),
UNIPHIER_MIO_CLK_USB2(9, 1),
UNIPHIER_MIO_CLK_USB2(10, 2),
- UNIPHIER_MIO_CLK_USB2(11, 3),
UNIPHIER_MIO_CLK_USB2_PHY(12, 0),
UNIPHIER_MIO_CLK_USB2_PHY(13, 1),
UNIPHIER_MIO_CLK_USB2_PHY(14, 2),
- UNIPHIER_MIO_CLK_USB2_PHY(15, 3),
{ /* sentinel */ }
};
diff --git a/drivers/clk/uniphier/clk-uniphier-sys.c b/drivers/clk/uniphier/clk-uniphier-sys.c
index ad0218182a9f..0e396f3da526 100644
--- a/drivers/clk/uniphier/clk-uniphier-sys.c
+++ b/drivers/clk/uniphier/clk-uniphier-sys.c
@@ -17,7 +17,7 @@
#include "clk-uniphier.h"
-#define UNIPHIER_SLD3_SYS_CLK_SD \
+#define UNIPHIER_LD4_SYS_CLK_SD \
UNIPHIER_CLK_FACTOR("sd-200m", -1, "spll", 1, 8), \
UNIPHIER_CLK_FACTOR("sd-133m", -1, "vpll27a", 1, 2)
@@ -30,7 +30,7 @@
UNIPHIER_CLK_FACTOR("sd-133m", -1, "spll", 1, 15)
/* Denali driver requires clk_x rate (clk: 50MHz, clk_x & ecc_clk: 200MHz) */
-#define UNIPHIER_SLD3_SYS_CLK_NAND(idx) \
+#define UNIPHIER_LD4_SYS_CLK_NAND(idx) \
UNIPHIER_CLK_FACTOR("nand-200m", -1, "spll", 1, 8), \
UNIPHIER_CLK_GATE("nand", (idx), "nand-200m", 0x2104, 2)
@@ -45,7 +45,7 @@
#define UNIPHIER_LD11_SYS_CLK_EMMC(idx) \
UNIPHIER_CLK_GATE("emmc", (idx), NULL, 0x210c, 2)
-#define UNIPHIER_SLD3_SYS_CLK_STDMAC(idx) \
+#define UNIPHIER_LD4_SYS_CLK_STDMAC(idx) \
UNIPHIER_CLK_GATE("stdmac", (idx), NULL, 0x2104, 10)
#define UNIPHIER_LD11_SYS_CLK_STDMAC(idx) \
@@ -57,19 +57,23 @@
#define UNIPHIER_PRO4_SYS_CLK_USB3(idx, ch) \
UNIPHIER_CLK_GATE("usb3" #ch, (idx), NULL, 0x2104, 16 + (ch))
-const struct uniphier_clk_data uniphier_sld3_sys_clk_data[] = {
- UNIPHIER_CLK_FACTOR("spll", -1, "ref", 65, 1), /* 1597.44 MHz */
- UNIPHIER_CLK_FACTOR("upll", -1, "ref", 6000, 512), /* 288 MHz */
- UNIPHIER_CLK_FACTOR("a2pll", -1, "ref", 24, 1), /* 589.824 MHz */
- UNIPHIER_CLK_FACTOR("vpll27a", -1, "ref", 5625, 512), /* 270 MHz */
- UNIPHIER_CLK_FACTOR("uart", 0, "a2pll", 1, 16),
- UNIPHIER_CLK_FACTOR("i2c", 1, "spll", 1, 16),
- UNIPHIER_SLD3_SYS_CLK_NAND(2),
- UNIPHIER_SLD3_SYS_CLK_SD,
- UNIPHIER_CLK_FACTOR("usb2", -1, "upll", 1, 12),
- UNIPHIER_SLD3_SYS_CLK_STDMAC(8),
- { /* sentinel */ }
-};
+#define UNIPHIER_LD11_SYS_CLK_AIO(idx) \
+ UNIPHIER_CLK_FACTOR("aio-io200m", -1, "spll", 1, 10), \
+ UNIPHIER_CLK_GATE("aio", (idx), "aio-io200m", 0x2108, 0)
+
+#define UNIPHIER_LD11_SYS_CLK_EVEA(idx) \
+ UNIPHIER_CLK_FACTOR("evea-io100m", -1, "spll", 1, 20), \
+ UNIPHIER_CLK_GATE("evea", (idx), "evea-io100m", 0x2108, 1)
+
+#define UNIPHIER_LD11_SYS_CLK_EXIV(idx) \
+ UNIPHIER_CLK_FACTOR("exiv-io200m", -1, "spll", 1, 10), \
+ UNIPHIER_CLK_GATE("exiv", (idx), "exiv-io200m", 0x2110, 2)
+
+#define UNIPHIER_PRO4_SYS_CLK_ETHER(idx) \
+ UNIPHIER_CLK_GATE("ether", (idx), NULL, 0x2104, 12)
+
+#define UNIPHIER_LD11_SYS_CLK_ETHER(idx) \
+ UNIPHIER_CLK_GATE("ether", (idx), NULL, 0x210c, 6)
const struct uniphier_clk_data uniphier_ld4_sys_clk_data[] = {
UNIPHIER_CLK_FACTOR("spll", -1, "ref", 65, 1), /* 1597.44 MHz */
@@ -78,10 +82,10 @@ const struct uniphier_clk_data uniphier_ld4_sys_clk_data[] = {
UNIPHIER_CLK_FACTOR("vpll27a", -1, "ref", 5625, 512), /* 270 MHz */
UNIPHIER_CLK_FACTOR("uart", 0, "a2pll", 1, 16),
UNIPHIER_CLK_FACTOR("i2c", 1, "spll", 1, 16),
- UNIPHIER_SLD3_SYS_CLK_NAND(2),
- UNIPHIER_SLD3_SYS_CLK_SD,
+ UNIPHIER_LD4_SYS_CLK_NAND(2),
+ UNIPHIER_LD4_SYS_CLK_SD,
UNIPHIER_CLK_FACTOR("usb2", -1, "upll", 1, 12),
- UNIPHIER_SLD3_SYS_CLK_STDMAC(8), /* Ether, HSC, MIO */
+ UNIPHIER_LD4_SYS_CLK_STDMAC(8), /* Ether, HSC, MIO */
{ /* sentinel */ }
};
@@ -92,10 +96,11 @@ const struct uniphier_clk_data uniphier_pro4_sys_clk_data[] = {
UNIPHIER_CLK_FACTOR("vpll27a", -1, "ref", 270, 25), /* 270 MHz */
UNIPHIER_CLK_FACTOR("uart", 0, "a2pll", 1, 8),
UNIPHIER_CLK_FACTOR("i2c", 1, "spll", 1, 32),
- UNIPHIER_SLD3_SYS_CLK_NAND(2),
- UNIPHIER_SLD3_SYS_CLK_SD,
+ UNIPHIER_LD4_SYS_CLK_NAND(2),
+ UNIPHIER_LD4_SYS_CLK_SD,
UNIPHIER_CLK_FACTOR("usb2", -1, "upll", 1, 12),
- UNIPHIER_SLD3_SYS_CLK_STDMAC(8), /* HSC, MIO, RLE */
+ UNIPHIER_PRO4_SYS_CLK_ETHER(6),
+ UNIPHIER_LD4_SYS_CLK_STDMAC(8), /* HSC, MIO, RLE */
UNIPHIER_PRO4_SYS_CLK_GIO(12), /* Ether, SATA, USB3 */
UNIPHIER_PRO4_SYS_CLK_USB3(14, 0),
UNIPHIER_PRO4_SYS_CLK_USB3(15, 1),
@@ -108,10 +113,10 @@ const struct uniphier_clk_data uniphier_sld8_sys_clk_data[] = {
UNIPHIER_CLK_FACTOR("vpll27a", -1, "ref", 270, 25), /* 270 MHz */
UNIPHIER_CLK_FACTOR("uart", 0, "spll", 1, 20),
UNIPHIER_CLK_FACTOR("i2c", 1, "spll", 1, 16),
- UNIPHIER_SLD3_SYS_CLK_NAND(2),
- UNIPHIER_SLD3_SYS_CLK_SD,
+ UNIPHIER_LD4_SYS_CLK_NAND(2),
+ UNIPHIER_LD4_SYS_CLK_SD,
UNIPHIER_CLK_FACTOR("usb2", -1, "upll", 1, 12),
- UNIPHIER_SLD3_SYS_CLK_STDMAC(8), /* Ether, HSC, MIO */
+ UNIPHIER_LD4_SYS_CLK_STDMAC(8), /* Ether, HSC, MIO */
{ /* sentinel */ }
};
@@ -123,7 +128,7 @@ const struct uniphier_clk_data uniphier_pro5_sys_clk_data[] = {
UNIPHIER_CLK_FACTOR("i2c", 1, "spll", 1, 48),
UNIPHIER_PRO5_SYS_CLK_NAND(2),
UNIPHIER_PRO5_SYS_CLK_SD,
- UNIPHIER_SLD3_SYS_CLK_STDMAC(8), /* HSC */
+ UNIPHIER_LD4_SYS_CLK_STDMAC(8), /* HSC */
UNIPHIER_PRO4_SYS_CLK_GIO(12), /* PCIe, USB3 */
UNIPHIER_PRO4_SYS_CLK_USB3(14, 0),
UNIPHIER_PRO4_SYS_CLK_USB3(15, 1),
@@ -136,7 +141,8 @@ const struct uniphier_clk_data uniphier_pxs2_sys_clk_data[] = {
UNIPHIER_CLK_FACTOR("i2c", 1, "spll", 1, 48),
UNIPHIER_PRO5_SYS_CLK_NAND(2),
UNIPHIER_PRO5_SYS_CLK_SD,
- UNIPHIER_SLD3_SYS_CLK_STDMAC(8), /* HSC, RLE */
+ UNIPHIER_PRO4_SYS_CLK_ETHER(6),
+ UNIPHIER_LD4_SYS_CLK_STDMAC(8), /* HSC, RLE */
/* GIO is always clock-enabled: no function for 0x2104 bit6 */
UNIPHIER_PRO4_SYS_CLK_USB3(14, 0),
UNIPHIER_PRO4_SYS_CLK_USB3(15, 1),
@@ -156,8 +162,12 @@ const struct uniphier_clk_data uniphier_ld11_sys_clk_data[] = {
UNIPHIER_LD11_SYS_CLK_NAND(2),
UNIPHIER_LD11_SYS_CLK_EMMC(4),
/* Index 5 reserved for eMMC PHY */
+ UNIPHIER_LD11_SYS_CLK_ETHER(6),
UNIPHIER_LD11_SYS_CLK_STDMAC(8), /* HSC, MIO */
UNIPHIER_CLK_FACTOR("usb2", -1, "ref", 24, 25),
+ UNIPHIER_LD11_SYS_CLK_AIO(40),
+ UNIPHIER_LD11_SYS_CLK_EVEA(41),
+ UNIPHIER_LD11_SYS_CLK_EXIV(42),
/* CPU gears */
UNIPHIER_CLK_DIV4("cpll", 2, 3, 4, 8),
UNIPHIER_CLK_DIV4("mpll", 2, 3, 4, 8),
@@ -185,6 +195,7 @@ const struct uniphier_clk_data uniphier_ld20_sys_clk_data[] = {
UNIPHIER_LD11_SYS_CLK_EMMC(4),
/* Index 5 reserved for eMMC PHY */
UNIPHIER_LD20_SYS_CLK_SD,
+ UNIPHIER_LD11_SYS_CLK_ETHER(6),
UNIPHIER_LD11_SYS_CLK_STDMAC(8), /* HSC */
/* GIO is always clock-enabled: no function for 0x210c bit5 */
/*
@@ -194,6 +205,9 @@ const struct uniphier_clk_data uniphier_ld20_sys_clk_data[] = {
UNIPHIER_CLK_GATE("usb30", 14, NULL, 0x210c, 14),
UNIPHIER_CLK_GATE("usb30-phy0", 16, NULL, 0x210c, 12),
UNIPHIER_CLK_GATE("usb30-phy1", 17, NULL, 0x210c, 13),
+ UNIPHIER_LD11_SYS_CLK_AIO(40),
+ UNIPHIER_LD11_SYS_CLK_EVEA(41),
+ UNIPHIER_LD11_SYS_CLK_EXIV(42),
/* CPU gears */
UNIPHIER_CLK_DIV4("cpll", 2, 3, 4, 8),
UNIPHIER_CLK_DIV4("spll", 2, 3, 4, 8),
@@ -209,3 +223,33 @@ const struct uniphier_clk_data uniphier_ld20_sys_clk_data[] = {
"spll/4", "spll/8", "s2pll/4", "s2pll/8"),
{ /* sentinel */ }
};
+
+const struct uniphier_clk_data uniphier_pxs3_sys_clk_data[] = {
+ UNIPHIER_CLK_FACTOR("cpll", -1, "ref", 104, 1), /* ARM: 2600 MHz */
+ UNIPHIER_CLK_FACTOR("spll", -1, "ref", 80, 1), /* 2000 MHz */
+ UNIPHIER_CLK_FACTOR("s2pll", -1, "ref", 88, 1), /* IPP: 2400 MHz */
+ UNIPHIER_CLK_FACTOR("uart", 0, "spll", 1, 34),
+ UNIPHIER_CLK_FACTOR("i2c", 1, "spll", 1, 40),
+ UNIPHIER_LD20_SYS_CLK_SD,
+ UNIPHIER_LD11_SYS_CLK_NAND(2),
+ UNIPHIER_LD11_SYS_CLK_EMMC(4),
+ UNIPHIER_CLK_GATE("usb30", 12, NULL, 0x2104, 4), /* =GIO0 */
+ UNIPHIER_CLK_GATE("usb31-0", 13, NULL, 0x2104, 5), /* =GIO1 */
+ UNIPHIER_CLK_GATE("usb31-1", 14, NULL, 0x2104, 6), /* =GIO1-1 */
+ UNIPHIER_CLK_GATE("usb30-phy0", 16, NULL, 0x210c, 16),
+ UNIPHIER_CLK_GATE("usb30-phy1", 17, NULL, 0x210c, 18),
+ UNIPHIER_CLK_GATE("usb30-phy2", 18, NULL, 0x210c, 20),
+ UNIPHIER_CLK_GATE("usb31-phy0", 20, NULL, 0x210c, 17),
+ UNIPHIER_CLK_GATE("usb31-phy1", 21, NULL, 0x210c, 19),
+ /* CPU gears */
+ UNIPHIER_CLK_DIV4("cpll", 2, 3, 4, 8),
+ UNIPHIER_CLK_DIV4("spll", 2, 3, 4, 8),
+ UNIPHIER_CLK_DIV4("s2pll", 2, 3, 4, 8),
+ UNIPHIER_CLK_CPUGEAR("cpu-ca53", 33, 0x8080, 0xf, 8,
+ "cpll/2", "spll/2", "cpll/3", "spll/3",
+ "spll/4", "spll/8", "cpll/4", "cpll/8"),
+ UNIPHIER_CLK_CPUGEAR("cpu-ipp", 34, 0x8100, 0xf, 8,
+ "s2pll/2", "spll/2", "s2pll/3", "spll/3",
+ "spll/4", "spll/8", "s2pll/4", "s2pll/8"),
+ { /* sentinel */ }
+};
diff --git a/drivers/clk/uniphier/clk-uniphier.h b/drivers/clk/uniphier/clk-uniphier.h
index 01c16ecec48f..d10a009ada96 100644
--- a/drivers/clk/uniphier/clk-uniphier.h
+++ b/drivers/clk/uniphier/clk-uniphier.h
@@ -147,7 +147,6 @@ struct clk_hw *uniphier_clk_register_mux(struct device *dev,
const char *name,
const struct uniphier_clk_mux_data *data);
-extern const struct uniphier_clk_data uniphier_sld3_sys_clk_data[];
extern const struct uniphier_clk_data uniphier_ld4_sys_clk_data[];
extern const struct uniphier_clk_data uniphier_pro4_sys_clk_data[];
extern const struct uniphier_clk_data uniphier_sld8_sys_clk_data[];
@@ -155,7 +154,8 @@ extern const struct uniphier_clk_data uniphier_pro5_sys_clk_data[];
extern const struct uniphier_clk_data uniphier_pxs2_sys_clk_data[];
extern const struct uniphier_clk_data uniphier_ld11_sys_clk_data[];
extern const struct uniphier_clk_data uniphier_ld20_sys_clk_data[];
-extern const struct uniphier_clk_data uniphier_sld3_mio_clk_data[];
+extern const struct uniphier_clk_data uniphier_pxs3_sys_clk_data[];
+extern const struct uniphier_clk_data uniphier_ld4_mio_clk_data[];
extern const struct uniphier_clk_data uniphier_pro5_sd_clk_data[];
extern const struct uniphier_clk_data uniphier_ld4_peri_clk_data[];
extern const struct uniphier_clk_data uniphier_pro4_peri_clk_data[];
diff --git a/drivers/clk/ux500/clk-prcc.c b/drivers/clk/ux500/clk-prcc.c
index 0e950769ed03..f50592775c9d 100644
--- a/drivers/clk/ux500/clk-prcc.c
+++ b/drivers/clk/ux500/clk-prcc.c
@@ -79,13 +79,13 @@ static int clk_prcc_is_enabled(struct clk_hw *hw)
return clk->is_enabled;
}
-static struct clk_ops clk_prcc_pclk_ops = {
+static const struct clk_ops clk_prcc_pclk_ops = {
.enable = clk_prcc_pclk_enable,
.disable = clk_prcc_pclk_disable,
.is_enabled = clk_prcc_is_enabled,
};
-static struct clk_ops clk_prcc_kclk_ops = {
+static const struct clk_ops clk_prcc_kclk_ops = {
.enable = clk_prcc_kclk_enable,
.disable = clk_prcc_kclk_disable,
.is_enabled = clk_prcc_is_enabled,
@@ -96,7 +96,7 @@ static struct clk *clk_reg_prcc(const char *name,
resource_size_t phy_base,
u32 cg_sel,
unsigned long flags,
- struct clk_ops *clk_prcc_ops)
+ const struct clk_ops *clk_prcc_ops)
{
struct clk_prcc *clk;
struct clk_init_data clk_prcc_init;
diff --git a/drivers/clk/ux500/clk-prcmu.c b/drivers/clk/ux500/clk-prcmu.c
index 7f343821f4e4..6e3e16b2e5ca 100644
--- a/drivers/clk/ux500/clk-prcmu.c
+++ b/drivers/clk/ux500/clk-prcmu.c
@@ -186,7 +186,7 @@ static void clk_prcmu_opp_volt_unprepare(struct clk_hw *hw)
clk->is_prepared = 0;
}
-static struct clk_ops clk_prcmu_scalable_ops = {
+static const struct clk_ops clk_prcmu_scalable_ops = {
.prepare = clk_prcmu_prepare,
.unprepare = clk_prcmu_unprepare,
.is_prepared = clk_prcmu_is_prepared,
@@ -198,7 +198,7 @@ static struct clk_ops clk_prcmu_scalable_ops = {
.set_rate = clk_prcmu_set_rate,
};
-static struct clk_ops clk_prcmu_gate_ops = {
+static const struct clk_ops clk_prcmu_gate_ops = {
.prepare = clk_prcmu_prepare,
.unprepare = clk_prcmu_unprepare,
.is_prepared = clk_prcmu_is_prepared,
@@ -208,19 +208,19 @@ static struct clk_ops clk_prcmu_gate_ops = {
.recalc_rate = clk_prcmu_recalc_rate,
};
-static struct clk_ops clk_prcmu_scalable_rate_ops = {
+static const struct clk_ops clk_prcmu_scalable_rate_ops = {
.is_enabled = clk_prcmu_is_enabled,
.recalc_rate = clk_prcmu_recalc_rate,
.round_rate = clk_prcmu_round_rate,
.set_rate = clk_prcmu_set_rate,
};
-static struct clk_ops clk_prcmu_rate_ops = {
+static const struct clk_ops clk_prcmu_rate_ops = {
.is_enabled = clk_prcmu_is_enabled,
.recalc_rate = clk_prcmu_recalc_rate,
};
-static struct clk_ops clk_prcmu_opp_gate_ops = {
+static const struct clk_ops clk_prcmu_opp_gate_ops = {
.prepare = clk_prcmu_opp_prepare,
.unprepare = clk_prcmu_opp_unprepare,
.is_prepared = clk_prcmu_is_prepared,
@@ -230,7 +230,7 @@ static struct clk_ops clk_prcmu_opp_gate_ops = {
.recalc_rate = clk_prcmu_recalc_rate,
};
-static struct clk_ops clk_prcmu_opp_volt_scalable_ops = {
+static const struct clk_ops clk_prcmu_opp_volt_scalable_ops = {
.prepare = clk_prcmu_opp_volt_prepare,
.unprepare = clk_prcmu_opp_volt_unprepare,
.is_prepared = clk_prcmu_is_prepared,
@@ -247,7 +247,7 @@ static struct clk *clk_reg_prcmu(const char *name,
u8 cg_sel,
unsigned long rate,
unsigned long flags,
- struct clk_ops *clk_prcmu_ops)
+ const struct clk_ops *clk_prcmu_ops)
{
struct clk_prcmu *clk;
struct clk_init_data clk_prcmu_init;
diff --git a/drivers/clk/ux500/clk-sysctrl.c b/drivers/clk/ux500/clk-sysctrl.c
index 266ddea630d2..8a4e93ce1e42 100644
--- a/drivers/clk/ux500/clk-sysctrl.c
+++ b/drivers/clk/ux500/clk-sysctrl.c
@@ -98,18 +98,18 @@ static u8 clk_sysctrl_get_parent(struct clk_hw *hw)
return clk->parent_index;
}
-static struct clk_ops clk_sysctrl_gate_ops = {
+static const struct clk_ops clk_sysctrl_gate_ops = {
.prepare = clk_sysctrl_prepare,
.unprepare = clk_sysctrl_unprepare,
};
-static struct clk_ops clk_sysctrl_gate_fixed_rate_ops = {
+static const struct clk_ops clk_sysctrl_gate_fixed_rate_ops = {
.prepare = clk_sysctrl_prepare,
.unprepare = clk_sysctrl_unprepare,
.recalc_rate = clk_sysctrl_recalc_rate,
};
-static struct clk_ops clk_sysctrl_set_parent_ops = {
+static const struct clk_ops clk_sysctrl_set_parent_ops = {
.set_parent = clk_sysctrl_set_parent,
.get_parent = clk_sysctrl_get_parent,
};
@@ -124,7 +124,7 @@ static struct clk *clk_reg_sysctrl(struct device *dev,
unsigned long rate,
unsigned long enable_delay_us,
unsigned long flags,
- struct clk_ops *clk_sysctrl_ops)
+ const struct clk_ops *clk_sysctrl_ops)
{
struct clk_sysctrl *clk;
struct clk_init_data clk_sysctrl_init;
diff --git a/drivers/clk/versatile/clk-vexpress-osc.c b/drivers/clk/versatile/clk-vexpress-osc.c
index 7e5add7d7752..e7a868b83fe5 100644
--- a/drivers/clk/versatile/clk-vexpress-osc.c
+++ b/drivers/clk/versatile/clk-vexpress-osc.c
@@ -61,7 +61,7 @@ static int vexpress_osc_set_rate(struct clk_hw *hw, unsigned long rate,
return regmap_write(osc->reg, 0, rate);
}
-static struct clk_ops vexpress_osc_ops = {
+static const struct clk_ops vexpress_osc_ops = {
.recalc_rate = vexpress_osc_recalc_rate,
.round_rate = vexpress_osc_round_rate,
.set_rate = vexpress_osc_set_rate,
diff --git a/drivers/clk/zte/clk-zx296718.c b/drivers/clk/zte/clk-zx296718.c
index 27f853d4c76b..354dd508c516 100644
--- a/drivers/clk/zte/clk-zx296718.c
+++ b/drivers/clk/zte/clk-zx296718.c
@@ -451,7 +451,7 @@ static struct zx_clk_fixed_factor top_ffactor_clk[] = {
FFACTOR(0, "emmc_mux_div2", "emmc_mux", 1, 2, CLK_SET_RATE_PARENT),
};
-static struct clk_div_table noc_div_table[] = {
+static const struct clk_div_table noc_div_table[] = {
{ .val = 1, .div = 2, },
{ .val = 3, .div = 4, },
};
@@ -644,7 +644,7 @@ static int __init top_clocks_init(struct device_node *np)
return 0;
}
-static struct clk_div_table common_even_div_table[] = {
+static const struct clk_div_table common_even_div_table[] = {
{ .val = 0, .div = 1, },
{ .val = 1, .div = 2, },
{ .val = 3, .div = 4, },
@@ -656,7 +656,7 @@ static struct clk_div_table common_even_div_table[] = {
{ .val = 15, .div = 16, },
};
-static struct clk_div_table common_div_table[] = {
+static const struct clk_div_table common_div_table[] = {
{ .val = 0, .div = 1, },
{ .val = 1, .div = 2, },
{ .val = 2, .div = 3, },
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index fcae5ca6ac92..cc6062049170 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -262,7 +262,7 @@ config CLKSRC_LPC32XX
config CLKSRC_PISTACHIO
bool "Clocksource for Pistachio SoC" if COMPILE_TEST
- depends on HAS_IOMEM
+ depends on GENERIC_CLOCKEVENTS && HAS_IOMEM
select TIMER_OF
help
Enables the clocksource for the Pistachio SoC.
@@ -598,6 +598,14 @@ config CLKSRC_IMX_GPT
depends on ARM && CLKDEV_LOOKUP
select CLKSRC_MMIO
+config CLKSRC_IMX_TPM
+ bool "Clocksource using i.MX TPM" if COMPILE_TEST
+ depends on ARM && CLKDEV_LOOKUP && GENERIC_CLOCKEVENTS
+ select CLKSRC_MMIO
+ help
+ Enable this option to use IMX Timer/PWM Module (TPM) timer as
+ clocksource.
+
config CLKSRC_ST_LPC
bool "Low power clocksource found in the LPC" if COMPILE_TEST
select TIMER_OF if OF
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index 6df949402dfc..dbc1ad14515e 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -67,6 +67,7 @@ obj-$(CONFIG_CLKSRC_VERSATILE) += versatile.o
obj-$(CONFIG_CLKSRC_MIPS_GIC) += mips-gic-timer.o
obj-$(CONFIG_CLKSRC_TANGO_XTAL) += tango_xtal.o
obj-$(CONFIG_CLKSRC_IMX_GPT) += timer-imx-gpt.o
+obj-$(CONFIG_CLKSRC_IMX_TPM) += timer-imx-tpm.o
obj-$(CONFIG_ASM9260_TIMER) += asm9260_timer.o
obj-$(CONFIG_H8300_TMR8) += h8300_timer8.o
obj-$(CONFIG_H8300_TMR16) += h8300_timer16.o
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index aae87c4c546e..fd4b7f684bd0 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -455,7 +455,11 @@ void arch_timer_enable_workaround(const struct arch_timer_erratum_workaround *wa
per_cpu(timer_unstable_counter_workaround, i) = wa;
}
- static_branch_enable(&arch_timer_read_ool_enabled);
+ /*
+ * Use the locked version, as we're called from the CPU
+ * hotplug framework. Otherwise, we end-up in deadlock-land.
+ */
+ static_branch_enable_cpuslocked(&arch_timer_read_ool_enabled);
/*
* Don't use the vdso fastpath if errata require using the
@@ -1440,7 +1444,7 @@ static int __init arch_timer_mem_acpi_init(int platform_timer_count)
* While unlikely, it's theoretically possible that none of the frames
* in a timer expose the combination of feature we want.
*/
- for (i = i; i < timer_count; i++) {
+ for (i = 0; i < timer_count; i++) {
timer = &timers[i];
frame = arch_timer_mem_find_best_frame(timer);
diff --git a/drivers/clocksource/bcm2835_timer.c b/drivers/clocksource/bcm2835_timer.c
index 82828d3a4739..39e489a96ad7 100644
--- a/drivers/clocksource/bcm2835_timer.c
+++ b/drivers/clocksource/bcm2835_timer.c
@@ -114,7 +114,6 @@ static int __init bcm2835_timer_init(struct device_node *node)
timer = kzalloc(sizeof(*timer), GFP_KERNEL);
if (!timer) {
- pr_err("Can't allocate timer struct\n");
ret = -ENOMEM;
goto err_iounmap;
}
diff --git a/drivers/clocksource/em_sti.c b/drivers/clocksource/em_sti.c
index bc48cbf6a795..269db74a0658 100644
--- a/drivers/clocksource/em_sti.c
+++ b/drivers/clocksource/em_sti.c
@@ -305,7 +305,7 @@ static int em_sti_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
dev_err(&pdev->dev, "failed to get irq\n");
- return -EINVAL;
+ return irq;
}
/* map memory, let base point to the STI instance */
@@ -314,11 +314,12 @@ static int em_sti_probe(struct platform_device *pdev)
if (IS_ERR(p->base))
return PTR_ERR(p->base);
- if (devm_request_irq(&pdev->dev, irq, em_sti_interrupt,
- IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING,
- dev_name(&pdev->dev), p)) {
+ ret = devm_request_irq(&pdev->dev, irq, em_sti_interrupt,
+ IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING,
+ dev_name(&pdev->dev), p);
+ if (ret) {
dev_err(&pdev->dev, "failed to request low IRQ\n");
- return -ENOENT;
+ return ret;
}
/* get hold of clock */
diff --git a/drivers/clocksource/mips-gic-timer.c b/drivers/clocksource/mips-gic-timer.c
index 17b861ea2626..ae3167c28b12 100644
--- a/drivers/clocksource/mips-gic-timer.c
+++ b/drivers/clocksource/mips-gic-timer.c
@@ -10,25 +10,45 @@
#include <linux/cpu.h>
#include <linux/init.h>
#include <linux/interrupt.h>
-#include <linux/irqchip/mips-gic.h>
#include <linux/notifier.h>
#include <linux/of_irq.h>
#include <linux/percpu.h>
#include <linux/smp.h>
#include <linux/time.h>
+#include <asm/mips-cps.h>
static DEFINE_PER_CPU(struct clock_event_device, gic_clockevent_device);
static int gic_timer_irq;
static unsigned int gic_frequency;
+static u64 notrace gic_read_count(void)
+{
+ unsigned int hi, hi2, lo;
+
+ if (mips_cm_is64)
+ return read_gic_counter();
+
+ do {
+ hi = read_gic_counter_32h();
+ lo = read_gic_counter_32l();
+ hi2 = read_gic_counter_32h();
+ } while (hi2 != hi);
+
+ return (((u64) hi) << 32) + lo;
+}
+
static int gic_next_event(unsigned long delta, struct clock_event_device *evt)
{
+ unsigned long flags;
u64 cnt;
int res;
cnt = gic_read_count();
cnt += (u64)delta;
- gic_write_cpu_compare(cnt, cpumask_first(evt->cpumask));
+ local_irq_save(flags);
+ write_gic_vl_other(mips_cm_vp_id(cpumask_first(evt->cpumask)));
+ write_gic_vo_compare(cnt);
+ local_irq_restore(flags);
res = ((int)(gic_read_count() - cnt) >= 0) ? -ETIME : 0;
return res;
}
@@ -37,7 +57,7 @@ static irqreturn_t gic_compare_interrupt(int irq, void *dev_id)
{
struct clock_event_device *cd = dev_id;
- gic_write_compare(gic_read_compare());
+ write_gic_vl_compare(read_gic_vl_compare());
cd->event_handler(cd);
return IRQ_HANDLED;
}
@@ -139,10 +159,15 @@ static struct clocksource gic_clocksource = {
static int __init __gic_clocksource_init(void)
{
+ unsigned int count_width;
int ret;
/* Set clocksource mask. */
- gic_clocksource.mask = CLOCKSOURCE_MASK(gic_get_count_width());
+ count_width = read_gic_config() & GIC_CONFIG_COUNTBITS;
+ count_width >>= __fls(GIC_CONFIG_COUNTBITS);
+ count_width *= 4;
+ count_width += 32;
+ gic_clocksource.mask = CLOCKSOURCE_MASK(count_width);
/* Calculate a somewhat reasonable rating value. */
gic_clocksource.rating = 200 + gic_frequency / 10000000;
@@ -159,7 +184,7 @@ static int __init gic_clocksource_of_init(struct device_node *node)
struct clk *clk;
int ret;
- if (!gic_present || !node->parent ||
+ if (!mips_gic_present() || !node->parent ||
!of_device_is_compatible(node->parent, "mti,gic")) {
pr_warn("No DT definition for the mips gic driver\n");
return -ENXIO;
@@ -197,7 +222,7 @@ static int __init gic_clocksource_of_init(struct device_node *node)
}
/* And finally start the counter */
- gic_start_count();
+ clear_gic_config(GIC_CONFIG_COUNTSTOP);
return 0;
}
diff --git a/drivers/clocksource/tango_xtal.c b/drivers/clocksource/tango_xtal.c
index c4e1c2e6046f..6a8d9838ce33 100644
--- a/drivers/clocksource/tango_xtal.c
+++ b/drivers/clocksource/tango_xtal.c
@@ -26,13 +26,13 @@ static int __init tango_clocksource_init(struct device_node *np)
xtal_in_cnt = of_iomap(np, 0);
if (xtal_in_cnt == NULL) {
- pr_err("%s: invalid address\n", np->full_name);
+ pr_err("%pOF: invalid address\n", np);
return -ENXIO;
}
clk = of_clk_get(np, 0);
if (IS_ERR(clk)) {
- pr_err("%s: invalid clock\n", np->full_name);
+ pr_err("%pOF: invalid clock\n", np);
return PTR_ERR(clk);
}
@@ -43,7 +43,7 @@ static int __init tango_clocksource_init(struct device_node *np)
ret = clocksource_mmio_init(xtal_in_cnt, "tango-xtal", xtal_freq, 350,
32, clocksource_mmio_readl_up);
if (ret) {
- pr_err("%s: registration failed\n", np->full_name);
+ pr_err("%pOF: registration failed\n", np);
return ret;
}
diff --git a/drivers/clocksource/timer-imx-tpm.c b/drivers/clocksource/timer-imx-tpm.c
new file mode 100644
index 000000000000..21bffdcb2f20
--- /dev/null
+++ b/drivers/clocksource/timer-imx-tpm.c
@@ -0,0 +1,239 @@
+/*
+ * Copyright 2016 Freescale Semiconductor, Inc.
+ * Copyright 2017 NXP
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ */
+
+#include <linux/clk.h>
+#include <linux/clockchips.h>
+#include <linux/clocksource.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/sched_clock.h>
+
+#define TPM_SC 0x10
+#define TPM_SC_CMOD_INC_PER_CNT (0x1 << 3)
+#define TPM_SC_CMOD_DIV_DEFAULT 0x3
+#define TPM_CNT 0x14
+#define TPM_MOD 0x18
+#define TPM_STATUS 0x1c
+#define TPM_STATUS_CH0F BIT(0)
+#define TPM_C0SC 0x20
+#define TPM_C0SC_CHIE BIT(6)
+#define TPM_C0SC_MODE_SHIFT 2
+#define TPM_C0SC_MODE_MASK 0x3c
+#define TPM_C0SC_MODE_SW_COMPARE 0x4
+#define TPM_C0V 0x24
+
+static void __iomem *timer_base;
+static struct clock_event_device clockevent_tpm;
+
+static inline void tpm_timer_disable(void)
+{
+ unsigned int val;
+
+ /* channel disable */
+ val = readl(timer_base + TPM_C0SC);
+ val &= ~(TPM_C0SC_MODE_MASK | TPM_C0SC_CHIE);
+ writel(val, timer_base + TPM_C0SC);
+}
+
+static inline void tpm_timer_enable(void)
+{
+ unsigned int val;
+
+ /* channel enabled in sw compare mode */
+ val = readl(timer_base + TPM_C0SC);
+ val |= (TPM_C0SC_MODE_SW_COMPARE << TPM_C0SC_MODE_SHIFT) |
+ TPM_C0SC_CHIE;
+ writel(val, timer_base + TPM_C0SC);
+}
+
+static inline void tpm_irq_acknowledge(void)
+{
+ writel(TPM_STATUS_CH0F, timer_base + TPM_STATUS);
+}
+
+static struct delay_timer tpm_delay_timer;
+
+static inline unsigned long tpm_read_counter(void)
+{
+ return readl(timer_base + TPM_CNT);
+}
+
+static unsigned long tpm_read_current_timer(void)
+{
+ return tpm_read_counter();
+}
+
+static u64 notrace tpm_read_sched_clock(void)
+{
+ return tpm_read_counter();
+}
+
+static int __init tpm_clocksource_init(unsigned long rate)
+{
+ tpm_delay_timer.read_current_timer = &tpm_read_current_timer;
+ tpm_delay_timer.freq = rate;
+ register_current_timer_delay(&tpm_delay_timer);
+
+ sched_clock_register(tpm_read_sched_clock, 32, rate);
+
+ return clocksource_mmio_init(timer_base + TPM_CNT, "imx-tpm",
+ rate, 200, 32, clocksource_mmio_readl_up);
+}
+
+static int tpm_set_next_event(unsigned long delta,
+ struct clock_event_device *evt)
+{
+ unsigned long next, now;
+
+ next = tpm_read_counter();
+ next += delta;
+ writel(next, timer_base + TPM_C0V);
+ now = tpm_read_counter();
+
+ /*
+ * NOTE: We observed in a very small probability, the bus fabric
+ * contention between GPU and A7 may results a few cycles delay
+ * of writing CNT registers which may cause the min_delta event got
+ * missed, so we need add a ETIME check here in case it happened.
+ */
+ return (int)((next - now) <= 0) ? -ETIME : 0;
+}
+
+static int tpm_set_state_oneshot(struct clock_event_device *evt)
+{
+ tpm_timer_enable();
+
+ return 0;
+}
+
+static int tpm_set_state_shutdown(struct clock_event_device *evt)
+{
+ tpm_timer_disable();
+
+ return 0;
+}
+
+static irqreturn_t tpm_timer_interrupt(int irq, void *dev_id)
+{
+ struct clock_event_device *evt = dev_id;
+
+ tpm_irq_acknowledge();
+
+ evt->event_handler(evt);
+
+ return IRQ_HANDLED;
+}
+
+static struct clock_event_device clockevent_tpm = {
+ .name = "i.MX7ULP TPM Timer",
+ .features = CLOCK_EVT_FEAT_ONESHOT,
+ .set_state_oneshot = tpm_set_state_oneshot,
+ .set_next_event = tpm_set_next_event,
+ .set_state_shutdown = tpm_set_state_shutdown,
+ .rating = 200,
+};
+
+static int __init tpm_clockevent_init(unsigned long rate, int irq)
+{
+ int ret;
+
+ ret = request_irq(irq, tpm_timer_interrupt, IRQF_TIMER | IRQF_IRQPOLL,
+ "i.MX7ULP TPM Timer", &clockevent_tpm);
+
+ clockevent_tpm.cpumask = cpumask_of(0);
+ clockevent_tpm.irq = irq;
+ clockevents_config_and_register(&clockevent_tpm,
+ rate, 300, 0xfffffffe);
+
+ return ret;
+}
+
+static int __init tpm_timer_init(struct device_node *np)
+{
+ struct clk *ipg, *per;
+ int irq, ret;
+ u32 rate;
+
+ timer_base = of_iomap(np, 0);
+ if (!timer_base) {
+ pr_err("tpm: failed to get base address\n");
+ return -ENXIO;
+ }
+
+ irq = irq_of_parse_and_map(np, 0);
+ if (!irq) {
+ pr_err("tpm: failed to get irq\n");
+ ret = -ENOENT;
+ goto err_iomap;
+ }
+
+ ipg = of_clk_get_by_name(np, "ipg");
+ per = of_clk_get_by_name(np, "per");
+ if (IS_ERR(ipg) || IS_ERR(per)) {
+ pr_err("tpm: failed to get igp or per clk\n");
+ ret = -ENODEV;
+ goto err_clk_get;
+ }
+
+ /* enable clk before accessing registers */
+ ret = clk_prepare_enable(ipg);
+ if (ret) {
+ pr_err("tpm: ipg clock enable failed (%d)\n", ret);
+ goto err_clk_get;
+ }
+
+ ret = clk_prepare_enable(per);
+ if (ret) {
+ pr_err("tpm: per clock enable failed (%d)\n", ret);
+ goto err_per_clk_enable;
+ }
+
+ /*
+ * Initialize tpm module to a known state
+ * 1) Counter disabled
+ * 2) TPM counter operates in up counting mode
+ * 3) Timer Overflow Interrupt disabled
+ * 4) Channel0 disabled
+ * 5) DMA transfers disabled
+ */
+ writel(0, timer_base + TPM_SC);
+ writel(0, timer_base + TPM_CNT);
+ writel(0, timer_base + TPM_C0SC);
+
+ /* increase per cnt, div 8 by default */
+ writel(TPM_SC_CMOD_INC_PER_CNT | TPM_SC_CMOD_DIV_DEFAULT,
+ timer_base + TPM_SC);
+
+ /* set MOD register to maximum for free running mode */
+ writel(0xffffffff, timer_base + TPM_MOD);
+
+ rate = clk_get_rate(per) >> 3;
+ ret = tpm_clocksource_init(rate);
+ if (ret)
+ goto err_per_clk_enable;
+
+ ret = tpm_clockevent_init(rate, irq);
+ if (ret)
+ goto err_per_clk_enable;
+
+ return 0;
+
+err_per_clk_enable:
+ clk_disable_unprepare(ipg);
+err_clk_get:
+ clk_put(per);
+ clk_put(ipg);
+err_iomap:
+ iounmap(timer_base);
+ return ret;
+}
+TIMER_OF_DECLARE(imx7ulp, "fsl,imx7ulp-tpm", tpm_timer_init);
diff --git a/drivers/clocksource/timer-of.c b/drivers/clocksource/timer-of.c
index d509b500a7b5..c79122d8e10d 100644
--- a/drivers/clocksource/timer-of.c
+++ b/drivers/clocksource/timer-of.c
@@ -52,7 +52,7 @@ static __init int timer_irq_init(struct device_node *np,
of_irq->irq = irq_of_parse_and_map(np, of_irq->index);
}
if (!of_irq->irq) {
- pr_err("Failed to map interrupt for %s\n", np->full_name);
+ pr_err("Failed to map interrupt for %pOF\n", np);
return -EINVAL;
}
@@ -63,8 +63,7 @@ static __init int timer_irq_init(struct device_node *np,
of_irq->flags ? of_irq->flags : IRQF_TIMER,
np->full_name, clkevt);
if (ret) {
- pr_err("Failed to request irq %d for %s\n", of_irq->irq,
- np->full_name);
+ pr_err("Failed to request irq %d for %pOF\n", of_irq->irq, np);
return ret;
}
@@ -88,20 +87,20 @@ static __init int timer_clk_init(struct device_node *np,
of_clk->clk = of_clk->name ? of_clk_get_by_name(np, of_clk->name) :
of_clk_get(np, of_clk->index);
if (IS_ERR(of_clk->clk)) {
- pr_err("Failed to get clock for %s\n", np->full_name);
+ pr_err("Failed to get clock for %pOF\n", np);
return PTR_ERR(of_clk->clk);
}
ret = clk_prepare_enable(of_clk->clk);
if (ret) {
- pr_err("Failed for enable clock for %s\n", np->full_name);
+ pr_err("Failed for enable clock for %pOF\n", np);
goto out_clk_put;
}
of_clk->rate = clk_get_rate(of_clk->clk);
if (!of_clk->rate) {
ret = -EINVAL;
- pr_err("Failed to get clock rate for %s\n", np->full_name);
+ pr_err("Failed to get clock rate for %pOF\n", np);
goto out_clk_disable;
}
@@ -128,9 +127,9 @@ static __init int timer_base_init(struct device_node *np,
const char *name = of_base->name ? of_base->name : np->full_name;
of_base->base = of_io_request_and_map(np, of_base->index, name);
- if (!of_base->base) {
+ if (IS_ERR(of_base->base)) {
pr_err("Failed to iomap (%s)\n", name);
- return -ENXIO;
+ return PTR_ERR(of_base->base);
}
return 0;
diff --git a/drivers/clocksource/timer-probe.c b/drivers/clocksource/timer-probe.c
index da81e5de74fe..028075720334 100644
--- a/drivers/clocksource/timer-probe.c
+++ b/drivers/clocksource/timer-probe.c
@@ -40,8 +40,7 @@ void __init timer_probe(void)
ret = init_func_ret(np);
if (ret) {
- pr_err("Failed to initialize '%s': %d\n",
- of_node_full_name(np), ret);
+ pr_err("Failed to initialize '%pOF': %d\n", np, ret);
continue;
}
diff --git a/drivers/clocksource/timer-stm32.c b/drivers/clocksource/timer-stm32.c
index 174d1243ea93..8f2423789ba9 100644
--- a/drivers/clocksource/timer-stm32.c
+++ b/drivers/clocksource/timer-stm32.c
@@ -138,7 +138,7 @@ static int __init stm32_clockevent_init(struct device_node *np)
irq = irq_of_parse_and_map(np, 0);
if (!irq) {
ret = -EINVAL;
- pr_err("%s: failed to get irq.\n", np->full_name);
+ pr_err("%pOF: failed to get irq.\n", np);
goto err_get_irq;
}
@@ -168,12 +168,12 @@ static int __init stm32_clockevent_init(struct device_node *np)
ret = request_irq(irq, stm32_clock_event_handler, IRQF_TIMER,
"stm32 clockevent", data);
if (ret) {
- pr_err("%s: failed to request irq.\n", np->full_name);
+ pr_err("%pOF: failed to request irq.\n", np);
goto err_get_irq;
}
- pr_info("%s: STM32 clockevent driver initialized (%d bits)\n",
- np->full_name, bits);
+ pr_info("%pOF: STM32 clockevent driver initialized (%d bits)\n",
+ np, bits);
return ret;
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 2011fec2d6ad..bdce4488ded1 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -71,15 +71,6 @@ config ARM_HIGHBANK_CPUFREQ
If in doubt, say N.
-config ARM_DB8500_CPUFREQ
- tristate "ST-Ericsson DB8500 cpufreq" if COMPILE_TEST && !ARCH_U8500
- default ARCH_U8500
- depends on HAS_IOMEM
- depends on !CPU_THERMAL || THERMAL
- help
- This adds the CPUFreq driver for ST-Ericsson Ux500 (DB8500) SoC
- series.
-
config ARM_IMX6Q_CPUFREQ
tristate "Freescale i.MX6 cpufreq support"
depends on ARCH_MXC
@@ -96,14 +87,13 @@ config ARM_KIRKWOOD_CPUFREQ
This adds the CPUFreq driver for Marvell Kirkwood
SoCs.
-config ARM_MT8173_CPUFREQ
- tristate "Mediatek MT8173 CPUFreq support"
+config ARM_MEDIATEK_CPUFREQ
+ tristate "CPU Frequency scaling support for MediaTek SoCs"
depends on ARCH_MEDIATEK && REGULATOR
- depends on ARM64 || (ARM_CPU_TOPOLOGY && COMPILE_TEST)
depends on !CPU_THERMAL || THERMAL
select PM_OPP
help
- This adds the CPUFreq driver support for Mediatek MT8173 SoC.
+ This adds the CPUFreq driver support for MediaTek SoCs.
config ARM_OMAP2PLUS_CPUFREQ
bool "TI OMAP2+"
@@ -242,6 +232,11 @@ config ARM_STI_CPUFREQ
this config option if you wish to add CPUFreq support for STi based
SoCs.
+config ARM_TANGO_CPUFREQ
+ bool
+ depends on CPUFREQ_DT && ARCH_TANGO
+ default y
+
config ARM_TEGRA20_CPUFREQ
bool "Tegra20 CPUFreq support"
depends on ARCH_TEGRA
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index ab3a42cd29ef..c7af9b2a255e 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -53,12 +53,11 @@ obj-$(CONFIG_ARM_DT_BL_CPUFREQ) += arm_big_little_dt.o
obj-$(CONFIG_ARM_BRCMSTB_AVS_CPUFREQ) += brcmstb-avs-cpufreq.o
obj-$(CONFIG_ARCH_DAVINCI) += davinci-cpufreq.o
-obj-$(CONFIG_ARM_DB8500_CPUFREQ) += dbx500-cpufreq.o
obj-$(CONFIG_ARM_EXYNOS5440_CPUFREQ) += exynos5440-cpufreq.o
obj-$(CONFIG_ARM_HIGHBANK_CPUFREQ) += highbank-cpufreq.o
obj-$(CONFIG_ARM_IMX6Q_CPUFREQ) += imx6q-cpufreq.o
obj-$(CONFIG_ARM_KIRKWOOD_CPUFREQ) += kirkwood-cpufreq.o
-obj-$(CONFIG_ARM_MT8173_CPUFREQ) += mt8173-cpufreq.o
+obj-$(CONFIG_ARM_MEDIATEK_CPUFREQ) += mediatek-cpufreq.o
obj-$(CONFIG_ARM_OMAP2PLUS_CPUFREQ) += omap-cpufreq.o
obj-$(CONFIG_ARM_PXA2xx_CPUFREQ) += pxa2xx-cpufreq.o
obj-$(CONFIG_PXA3xx) += pxa3xx-cpufreq.o
@@ -75,6 +74,7 @@ obj-$(CONFIG_ARM_SA1110_CPUFREQ) += sa1110-cpufreq.o
obj-$(CONFIG_ARM_SCPI_CPUFREQ) += scpi-cpufreq.o
obj-$(CONFIG_ARM_SPEAR_CPUFREQ) += spear-cpufreq.o
obj-$(CONFIG_ARM_STI_CPUFREQ) += sti-cpufreq.o
+obj-$(CONFIG_ARM_TANGO_CPUFREQ) += tango-cpufreq.o
obj-$(CONFIG_ARM_TEGRA20_CPUFREQ) += tegra20-cpufreq.o
obj-$(CONFIG_ARM_TEGRA124_CPUFREQ) += tegra124-cpufreq.o
obj-$(CONFIG_ARM_TEGRA186_CPUFREQ) += tegra186-cpufreq.o
diff --git a/drivers/cpufreq/arm_big_little.c b/drivers/cpufreq/arm_big_little.c
index ea6d62547b10..17504129fd77 100644
--- a/drivers/cpufreq/arm_big_little.c
+++ b/drivers/cpufreq/arm_big_little.c
@@ -483,11 +483,8 @@ static int bL_cpufreq_init(struct cpufreq_policy *policy)
return ret;
}
- if (arm_bL_ops->get_transition_latency)
- policy->cpuinfo.transition_latency =
- arm_bL_ops->get_transition_latency(cpu_dev);
- else
- policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
+ policy->cpuinfo.transition_latency =
+ arm_bL_ops->get_transition_latency(cpu_dev);
if (is_bL_switching_enabled())
per_cpu(cpu_last_req_freq, policy->cpu) = clk_get_cpu_rate(policy->cpu);
@@ -622,7 +619,8 @@ int bL_cpufreq_register(struct cpufreq_arm_bL_ops *ops)
return -EBUSY;
}
- if (!ops || !strlen(ops->name) || !ops->init_opp_table) {
+ if (!ops || !strlen(ops->name) || !ops->init_opp_table ||
+ !ops->get_transition_latency) {
pr_err("%s: Invalid arm_bL_ops, exiting\n", __func__);
return -ENODEV;
}
diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c
index 10be285c9055..a1c3025f9df7 100644
--- a/drivers/cpufreq/cppc_cpufreq.c
+++ b/drivers/cpufreq/cppc_cpufreq.c
@@ -172,7 +172,6 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
return -EFAULT;
}
- cpumask_set_cpu(policy->cpu, policy->cpus);
cpu->cur_policy = policy;
/* Set policy->cur to max now. The governors will adjust later. */
diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c
index 1c262923fe58..a020da7940d6 100644
--- a/drivers/cpufreq/cpufreq-dt-platdev.c
+++ b/drivers/cpufreq/cpufreq-dt-platdev.c
@@ -9,11 +9,16 @@
#include <linux/err.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include "cpufreq-dt.h"
-static const struct of_device_id machines[] __initconst = {
+/*
+ * Machines for which the cpufreq device is *always* created, mostly used for
+ * platforms using "operating-points" (V1) property.
+ */
+static const struct of_device_id whitelist[] __initconst = {
{ .compatible = "allwinner,sun4i-a10", },
{ .compatible = "allwinner,sun5i-a10s", },
{ .compatible = "allwinner,sun5i-a13", },
@@ -22,7 +27,6 @@ static const struct of_device_id machines[] __initconst = {
{ .compatible = "allwinner,sun6i-a31s", },
{ .compatible = "allwinner,sun7i-a20", },
{ .compatible = "allwinner,sun8i-a23", },
- { .compatible = "allwinner,sun8i-a33", },
{ .compatible = "allwinner,sun8i-a83t", },
{ .compatible = "allwinner,sun8i-h3", },
@@ -32,7 +36,6 @@ static const struct of_device_id machines[] __initconst = {
{ .compatible = "arm,integrator-cp", },
{ .compatible = "hisilicon,hi3660", },
- { .compatible = "hisilicon,hi6220", },
{ .compatible = "fsl,imx27", },
{ .compatible = "fsl,imx51", },
@@ -46,11 +49,8 @@ static const struct of_device_id machines[] __initconst = {
{ .compatible = "samsung,exynos3250", },
{ .compatible = "samsung,exynos4210", },
{ .compatible = "samsung,exynos4212", },
- { .compatible = "samsung,exynos4412", },
{ .compatible = "samsung,exynos5250", },
#ifndef CONFIG_BL_SWITCHER
- { .compatible = "samsung,exynos5420", },
- { .compatible = "samsung,exynos5433", },
{ .compatible = "samsung,exynos5800", },
#endif
@@ -67,6 +67,8 @@ static const struct of_device_id machines[] __initconst = {
{ .compatible = "renesas,r8a7792", },
{ .compatible = "renesas,r8a7793", },
{ .compatible = "renesas,r8a7794", },
+ { .compatible = "renesas,r8a7795", },
+ { .compatible = "renesas,r8a7796", },
{ .compatible = "renesas,sh73a0", },
{ .compatible = "rockchip,rk2928", },
@@ -76,17 +78,17 @@ static const struct of_device_id machines[] __initconst = {
{ .compatible = "rockchip,rk3188", },
{ .compatible = "rockchip,rk3228", },
{ .compatible = "rockchip,rk3288", },
+ { .compatible = "rockchip,rk3328", },
{ .compatible = "rockchip,rk3366", },
{ .compatible = "rockchip,rk3368", },
{ .compatible = "rockchip,rk3399", },
- { .compatible = "sigma,tango4" },
-
- { .compatible = "socionext,uniphier-pro5", },
- { .compatible = "socionext,uniphier-pxs2", },
{ .compatible = "socionext,uniphier-ld6b", },
- { .compatible = "socionext,uniphier-ld11", },
- { .compatible = "socionext,uniphier-ld20", },
+
+ { .compatible = "st-ericsson,u8500", },
+ { .compatible = "st-ericsson,u8540", },
+ { .compatible = "st-ericsson,u9500", },
+ { .compatible = "st-ericsson,u9540", },
{ .compatible = "ti,omap2", },
{ .compatible = "ti,omap3", },
@@ -94,27 +96,56 @@ static const struct of_device_id machines[] __initconst = {
{ .compatible = "ti,omap5", },
{ .compatible = "xlnx,zynq-7000", },
+ { .compatible = "xlnx,zynqmp", },
- { .compatible = "zte,zx296718", },
+ { }
+};
+/*
+ * Machines for which the cpufreq device is *not* created, mostly used for
+ * platforms using "operating-points-v2" property.
+ */
+static const struct of_device_id blacklist[] __initconst = {
{ }
};
+static bool __init cpu0_node_has_opp_v2_prop(void)
+{
+ struct device_node *np = of_cpu_device_node_get(0);
+ bool ret = false;
+
+ if (of_get_property(np, "operating-points-v2", NULL))
+ ret = true;
+
+ of_node_put(np);
+ return ret;
+}
+
static int __init cpufreq_dt_platdev_init(void)
{
struct device_node *np = of_find_node_by_path("/");
const struct of_device_id *match;
+ const void *data = NULL;
if (!np)
return -ENODEV;
- match = of_match_node(machines, np);
+ match = of_match_node(whitelist, np);
+ if (match) {
+ data = match->data;
+ goto create_pdev;
+ }
+
+ if (cpu0_node_has_opp_v2_prop() && !of_match_node(blacklist, np))
+ goto create_pdev;
+
of_node_put(np);
- if (!match)
- return -ENODEV;
+ return -ENODEV;
+create_pdev:
+ of_node_put(np);
return PTR_ERR_OR_ZERO(platform_device_register_data(NULL, "cpufreq-dt",
- -1, match->data,
+ -1, data,
sizeof(struct cpufreq_dt_platform_data)));
}
device_initcall(cpufreq_dt_platdev_init);
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
index fef3c2160691..d83ab94d041a 100644
--- a/drivers/cpufreq/cpufreq-dt.c
+++ b/drivers/cpufreq/cpufreq-dt.c
@@ -274,6 +274,7 @@ static int cpufreq_init(struct cpufreq_policy *policy)
transition_latency = CPUFREQ_ETERNAL;
policy->cpuinfo.transition_latency = transition_latency;
+ policy->dvfs_possible_from_any_cpu = true;
return 0;
diff --git a/drivers/cpufreq/cpufreq-nforce2.c b/drivers/cpufreq/cpufreq-nforce2.c
index 5503d491b016..dbf82f36d270 100644
--- a/drivers/cpufreq/cpufreq-nforce2.c
+++ b/drivers/cpufreq/cpufreq-nforce2.c
@@ -357,7 +357,6 @@ static int nforce2_cpu_init(struct cpufreq_policy *policy)
/* cpuinfo and default policy values */
policy->min = policy->cpuinfo.min_freq = min_fsb * fid * 100;
policy->max = policy->cpuinfo.max_freq = max_fsb * fid * 100;
- policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
return 0;
}
@@ -369,6 +368,7 @@ static int nforce2_cpu_exit(struct cpufreq_policy *policy)
static struct cpufreq_driver nforce2_driver = {
.name = "nforce2",
+ .flags = CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING,
.verify = nforce2_verify,
.target = nforce2_target,
.get = nforce2_get,
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 9bf97a366029..ea43b147a7fe 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -524,6 +524,32 @@ unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy *policy,
}
EXPORT_SYMBOL_GPL(cpufreq_driver_resolve_freq);
+unsigned int cpufreq_policy_transition_delay_us(struct cpufreq_policy *policy)
+{
+ unsigned int latency;
+
+ if (policy->transition_delay_us)
+ return policy->transition_delay_us;
+
+ latency = policy->cpuinfo.transition_latency / NSEC_PER_USEC;
+ if (latency) {
+ /*
+ * For platforms that can change the frequency very fast (< 10
+ * us), the above formula gives a decent transition delay. But
+ * for platforms where transition_latency is in milliseconds, it
+ * ends up giving unrealistic values.
+ *
+ * Cap the default transition delay to 10 ms, which seems to be
+ * a reasonable amount of time after which we should reevaluate
+ * the frequency.
+ */
+ return min(latency * LATENCY_MULTIPLIER, (unsigned int)10000);
+ }
+
+ return LATENCY_MULTIPLIER;
+}
+EXPORT_SYMBOL_GPL(cpufreq_policy_transition_delay_us);
+
/*********************************************************************
* SYSFS INTERFACE *
*********************************************************************/
@@ -1817,9 +1843,10 @@ EXPORT_SYMBOL(cpufreq_unregister_notifier);
* twice in parallel for the same policy and that it will never be called in
* parallel with either ->target() or ->target_index() for the same policy.
*
- * If CPUFREQ_ENTRY_INVALID is returned by the driver's ->fast_switch()
- * callback to indicate an error condition, the hardware configuration must be
- * preserved.
+ * Returns the actual frequency set for the CPU.
+ *
+ * If 0 is returned by the driver's ->fast_switch() callback to indicate an
+ * error condition, the hardware configuration must be preserved.
*/
unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy,
unsigned int target_freq)
@@ -1988,13 +2015,13 @@ static int cpufreq_init_governor(struct cpufreq_policy *policy)
if (!policy->governor)
return -EINVAL;
- if (policy->governor->max_transition_latency &&
- policy->cpuinfo.transition_latency >
- policy->governor->max_transition_latency) {
+ /* Platform doesn't want dynamic frequency switching ? */
+ if (policy->governor->dynamic_switching &&
+ cpufreq_driver->flags & CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING) {
struct cpufreq_governor *gov = cpufreq_fallback_governor();
if (gov) {
- pr_warn("%s governor failed, too long transition latency of HW, fallback to %s governor\n",
+ pr_warn("Can't use %s governor as dynamic switching is disallowed. Fallback to %s governor\n",
policy->governor->name, gov->name);
policy->governor = gov;
} else {
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index 88220ff3e1c2..f20f20a77d4d 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -246,7 +246,6 @@ gov_show_one_common(sampling_rate);
gov_show_one_common(sampling_down_factor);
gov_show_one_common(up_threshold);
gov_show_one_common(ignore_nice_load);
-gov_show_one_common(min_sampling_rate);
gov_show_one(cs, down_threshold);
gov_show_one(cs, freq_step);
@@ -254,12 +253,10 @@ gov_attr_rw(sampling_rate);
gov_attr_rw(sampling_down_factor);
gov_attr_rw(up_threshold);
gov_attr_rw(ignore_nice_load);
-gov_attr_ro(min_sampling_rate);
gov_attr_rw(down_threshold);
gov_attr_rw(freq_step);
static struct attribute *cs_attributes[] = {
- &min_sampling_rate.attr,
&sampling_rate.attr,
&sampling_down_factor.attr,
&up_threshold.attr,
@@ -297,10 +294,7 @@ static int cs_init(struct dbs_data *dbs_data)
dbs_data->up_threshold = DEF_FREQUENCY_UP_THRESHOLD;
dbs_data->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR;
dbs_data->ignore_nice_load = 0;
-
dbs_data->tuners = tuners;
- dbs_data->min_sampling_rate = MIN_SAMPLING_RATE_RATIO *
- jiffies_to_usecs(10);
return 0;
}
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
index 47e24b5384b3..58d4f4e1ad6a 100644
--- a/drivers/cpufreq/cpufreq_governor.c
+++ b/drivers/cpufreq/cpufreq_governor.c
@@ -47,14 +47,11 @@ ssize_t store_sampling_rate(struct gov_attr_set *attr_set, const char *buf,
{
struct dbs_data *dbs_data = to_dbs_data(attr_set);
struct policy_dbs_info *policy_dbs;
- unsigned int rate;
int ret;
- ret = sscanf(buf, "%u", &rate);
+ ret = sscanf(buf, "%u", &dbs_data->sampling_rate);
if (ret != 1)
return -EINVAL;
- dbs_data->sampling_rate = max(rate, dbs_data->min_sampling_rate);
-
/*
* We are operating under dbs_data->mutex and so the list and its
* entries can't be freed concurrently.
@@ -275,6 +272,9 @@ static void dbs_update_util_handler(struct update_util_data *data, u64 time,
struct policy_dbs_info *policy_dbs = cdbs->policy_dbs;
u64 delta_ns, lst;
+ if (!cpufreq_can_do_remote_dvfs(policy_dbs->policy))
+ return;
+
/*
* The work may not be allowed to be queued up right now.
* Possible reasons:
@@ -392,7 +392,6 @@ int cpufreq_dbs_governor_init(struct cpufreq_policy *policy)
struct dbs_governor *gov = dbs_governor_of(policy);
struct dbs_data *dbs_data;
struct policy_dbs_info *policy_dbs;
- unsigned int latency;
int ret = 0;
/* State should be equivalent to EXIT */
@@ -431,16 +430,7 @@ int cpufreq_dbs_governor_init(struct cpufreq_policy *policy)
if (ret)
goto free_policy_dbs_info;
- /* policy latency is in ns. Convert it to us first */
- latency = policy->cpuinfo.transition_latency / 1000;
- if (latency == 0)
- latency = 1;
-
- /* Bring kernel and HW constraints together */
- dbs_data->min_sampling_rate = max(dbs_data->min_sampling_rate,
- MIN_LATENCY_MULTIPLIER * latency);
- dbs_data->sampling_rate = max(dbs_data->min_sampling_rate,
- LATENCY_MULTIPLIER * latency);
+ dbs_data->sampling_rate = cpufreq_policy_transition_delay_us(policy);
if (!have_governor_per_policy())
gov->gdbs_data = dbs_data;
diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
index 0236ec2cd654..8463f5def0f5 100644
--- a/drivers/cpufreq/cpufreq_governor.h
+++ b/drivers/cpufreq/cpufreq_governor.h
@@ -41,7 +41,6 @@ enum {OD_NORMAL_SAMPLE, OD_SUB_SAMPLE};
struct dbs_data {
struct gov_attr_set attr_set;
void *tuners;
- unsigned int min_sampling_rate;
unsigned int ignore_nice_load;
unsigned int sampling_rate;
unsigned int sampling_down_factor;
@@ -160,7 +159,7 @@ void cpufreq_dbs_governor_limits(struct cpufreq_policy *policy);
#define CPUFREQ_DBS_GOVERNOR_INITIALIZER(_name_) \
{ \
.name = _name_, \
- .max_transition_latency = TRANSITION_LATENCY_LIMIT, \
+ .dynamic_switching = true, \
.owner = THIS_MODULE, \
.init = cpufreq_dbs_governor_init, \
.exit = cpufreq_dbs_governor_exit, \
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 3937acf7e026..6b423eebfd5d 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -319,7 +319,6 @@ gov_show_one_common(sampling_rate);
gov_show_one_common(up_threshold);
gov_show_one_common(sampling_down_factor);
gov_show_one_common(ignore_nice_load);
-gov_show_one_common(min_sampling_rate);
gov_show_one_common(io_is_busy);
gov_show_one(od, powersave_bias);
@@ -329,10 +328,8 @@ gov_attr_rw(up_threshold);
gov_attr_rw(sampling_down_factor);
gov_attr_rw(ignore_nice_load);
gov_attr_rw(powersave_bias);
-gov_attr_ro(min_sampling_rate);
static struct attribute *od_attributes[] = {
- &min_sampling_rate.attr,
&sampling_rate.attr,
&up_threshold.attr,
&sampling_down_factor.attr,
@@ -373,17 +370,8 @@ static int od_init(struct dbs_data *dbs_data)
if (idle_time != -1ULL) {
/* Idle micro accounting is supported. Use finer thresholds */
dbs_data->up_threshold = MICRO_FREQUENCY_UP_THRESHOLD;
- /*
- * In nohz/micro accounting case we set the minimum frequency
- * not depending on HZ, but fixed (very low).
- */
- dbs_data->min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE;
} else {
dbs_data->up_threshold = DEF_FREQUENCY_UP_THRESHOLD;
-
- /* For correct statistics, we need 10 ticks for each measure */
- dbs_data->min_sampling_rate = MIN_SAMPLING_RATE_RATIO *
- jiffies_to_usecs(10);
}
dbs_data->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR;
diff --git a/drivers/cpufreq/dbx500-cpufreq.c b/drivers/cpufreq/dbx500-cpufreq.c
deleted file mode 100644
index 4ee0431579c1..000000000000
--- a/drivers/cpufreq/dbx500-cpufreq.c
+++ /dev/null
@@ -1,103 +0,0 @@
-/*
- * Copyright (C) STMicroelectronics 2009
- * Copyright (C) ST-Ericsson SA 2010-2012
- *
- * License Terms: GNU General Public License v2
- * Author: Sundar Iyer <sundar.iyer@stericsson.com>
- * Author: Martin Persson <martin.persson@stericsson.com>
- * Author: Jonas Aaberg <jonas.aberg@stericsson.com>
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/cpufreq.h>
-#include <linux/cpu_cooling.h>
-#include <linux/delay.h>
-#include <linux/slab.h>
-#include <linux/platform_device.h>
-#include <linux/clk.h>
-
-static struct cpufreq_frequency_table *freq_table;
-static struct clk *armss_clk;
-static struct thermal_cooling_device *cdev;
-
-static int dbx500_cpufreq_target(struct cpufreq_policy *policy,
- unsigned int index)
-{
- /* update armss clk frequency */
- return clk_set_rate(armss_clk, freq_table[index].frequency * 1000);
-}
-
-static int dbx500_cpufreq_init(struct cpufreq_policy *policy)
-{
- policy->clk = armss_clk;
- return cpufreq_generic_init(policy, freq_table, 20 * 1000);
-}
-
-static int dbx500_cpufreq_exit(struct cpufreq_policy *policy)
-{
- if (!IS_ERR(cdev))
- cpufreq_cooling_unregister(cdev);
- return 0;
-}
-
-static void dbx500_cpufreq_ready(struct cpufreq_policy *policy)
-{
- cdev = cpufreq_cooling_register(policy);
- if (IS_ERR(cdev))
- pr_err("Failed to register cooling device %ld\n", PTR_ERR(cdev));
- else
- pr_info("Cooling device registered: %s\n", cdev->type);
-}
-
-static struct cpufreq_driver dbx500_cpufreq_driver = {
- .flags = CPUFREQ_STICKY | CPUFREQ_CONST_LOOPS |
- CPUFREQ_NEED_INITIAL_FREQ_CHECK,
- .verify = cpufreq_generic_frequency_table_verify,
- .target_index = dbx500_cpufreq_target,
- .get = cpufreq_generic_get,
- .init = dbx500_cpufreq_init,
- .exit = dbx500_cpufreq_exit,
- .ready = dbx500_cpufreq_ready,
- .name = "DBX500",
- .attr = cpufreq_generic_attr,
-};
-
-static int dbx500_cpufreq_probe(struct platform_device *pdev)
-{
- struct cpufreq_frequency_table *pos;
-
- freq_table = dev_get_platdata(&pdev->dev);
- if (!freq_table) {
- pr_err("dbx500-cpufreq: Failed to fetch cpufreq table\n");
- return -ENODEV;
- }
-
- armss_clk = clk_get(&pdev->dev, "armss");
- if (IS_ERR(armss_clk)) {
- pr_err("dbx500-cpufreq: Failed to get armss clk\n");
- return PTR_ERR(armss_clk);
- }
-
- pr_info("dbx500-cpufreq: Available frequencies:\n");
- cpufreq_for_each_entry(pos, freq_table)
- pr_info(" %d Mhz\n", pos->frequency / 1000);
-
- return cpufreq_register_driver(&dbx500_cpufreq_driver);
-}
-
-static struct platform_driver dbx500_cpufreq_plat_driver = {
- .driver = {
- .name = "cpufreq-ux500",
- },
- .probe = dbx500_cpufreq_probe,
-};
-
-static int __init dbx500_cpufreq_register(void)
-{
- return platform_driver_register(&dbx500_cpufreq_plat_driver);
-}
-device_initcall(dbx500_cpufreq_register);
-
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("cpufreq driver for DBX500");
diff --git a/drivers/cpufreq/elanfreq.c b/drivers/cpufreq/elanfreq.c
index bfce11cba1df..45e2ca62515e 100644
--- a/drivers/cpufreq/elanfreq.c
+++ b/drivers/cpufreq/elanfreq.c
@@ -165,9 +165,6 @@ static int elanfreq_cpu_init(struct cpufreq_policy *policy)
if (pos->frequency > max_freq)
pos->frequency = CPUFREQ_ENTRY_INVALID;
- /* cpuinfo and default policy values */
- policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
-
return cpufreq_table_validate_and_show(policy, elanfreq_table);
}
@@ -196,6 +193,7 @@ __setup("elanfreq=", elanfreq_setup);
static struct cpufreq_driver elanfreq_driver = {
.get = elanfreq_get_cpu_frequency,
+ .flags = CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = elanfreq_target,
.init = elanfreq_cpu_init,
diff --git a/drivers/cpufreq/gx-suspmod.c b/drivers/cpufreq/gx-suspmod.c
index 3488c9c175eb..8f52a06664e3 100644
--- a/drivers/cpufreq/gx-suspmod.c
+++ b/drivers/cpufreq/gx-suspmod.c
@@ -428,7 +428,6 @@ static int cpufreq_gx_cpu_init(struct cpufreq_policy *policy)
policy->max = maxfreq;
policy->cpuinfo.min_freq = maxfreq / max_duration;
policy->cpuinfo.max_freq = maxfreq;
- policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
return 0;
}
@@ -438,6 +437,7 @@ static int cpufreq_gx_cpu_init(struct cpufreq_policy *policy)
* MediaGX/Geode GX initialize cpufreq driver
*/
static struct cpufreq_driver gx_suspmod_driver = {
+ .flags = CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING,
.get = gx_get_cpuspeed,
.verify = cpufreq_gx_verify,
.target = cpufreq_gx_target,
diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c
index b6edd3ccaa55..14466a9b01c0 100644
--- a/drivers/cpufreq/imx6q-cpufreq.c
+++ b/drivers/cpufreq/imx6q-cpufreq.c
@@ -47,6 +47,7 @@ static int imx6q_set_target(struct cpufreq_policy *policy, unsigned int index)
struct dev_pm_opp *opp;
unsigned long freq_hz, volt, volt_old;
unsigned int old_freq, new_freq;
+ bool pll1_sys_temp_enabled = false;
int ret;
new_freq = freq_table[index].frequency;
@@ -124,6 +125,10 @@ static int imx6q_set_target(struct cpufreq_policy *policy, unsigned int index)
if (freq_hz > clk_get_rate(pll2_pfd2_396m_clk)) {
clk_set_rate(pll1_sys_clk, new_freq * 1000);
clk_set_parent(pll1_sw_clk, pll1_sys_clk);
+ } else {
+ /* pll1_sys needs to be enabled for divider rate change to work. */
+ pll1_sys_temp_enabled = true;
+ clk_prepare_enable(pll1_sys_clk);
}
}
@@ -135,6 +140,10 @@ static int imx6q_set_target(struct cpufreq_policy *policy, unsigned int index)
return ret;
}
+ /* PLL1 is only needed until after ARM-PODF is set. */
+ if (pll1_sys_temp_enabled)
+ clk_disable_unprepare(pll1_sys_clk);
+
/* scaling down? scale voltage after frequency */
if (new_freq < old_freq) {
ret = regulator_set_voltage_tol(arm_reg, volt, 0);
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 0566455f233e..93a0e88bef76 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -37,8 +37,7 @@
#include <asm/cpufeature.h>
#include <asm/intel-family.h>
-#define INTEL_PSTATE_DEFAULT_SAMPLING_INTERVAL (10 * NSEC_PER_MSEC)
-#define INTEL_PSTATE_HWP_SAMPLING_INTERVAL (50 * NSEC_PER_MSEC)
+#define INTEL_PSTATE_SAMPLING_INTERVAL (10 * NSEC_PER_MSEC)
#define INTEL_CPUFREQ_TRANSITION_LATENCY 20000
#define INTEL_CPUFREQ_TRANSITION_DELAY 500
@@ -173,28 +172,6 @@ struct vid_data {
};
/**
- * struct _pid - Stores PID data
- * @setpoint: Target set point for busyness or performance
- * @integral: Storage for accumulated error values
- * @p_gain: PID proportional gain
- * @i_gain: PID integral gain
- * @d_gain: PID derivative gain
- * @deadband: PID deadband
- * @last_err: Last error storage for integral part of PID calculation
- *
- * Stores PID coefficients and last error for PID controller.
- */
-struct _pid {
- int setpoint;
- int32_t integral;
- int32_t p_gain;
- int32_t i_gain;
- int32_t d_gain;
- int deadband;
- int32_t last_err;
-};
-
-/**
* struct global_params - Global parameters, mostly tunable via sysfs.
* @no_turbo: Whether or not to use turbo P-states.
* @turbo_disabled: Whethet or not turbo P-states are available at all,
@@ -223,7 +200,6 @@ struct global_params {
* @last_update: Time of the last update.
* @pstate: Stores P state limits for this CPU
* @vid: Stores VID limits for this CPU
- * @pid: Stores PID parameters for this CPU
* @last_sample_time: Last Sample time
* @aperf_mperf_shift: Number of clock cycles after aperf, merf is incremented
* This shift is a multiplier to mperf delta to
@@ -258,7 +234,6 @@ struct cpudata {
struct pstate_data pstate;
struct vid_data vid;
- struct _pid pid;
u64 last_update;
u64 last_sample_time;
@@ -284,28 +259,6 @@ struct cpudata {
static struct cpudata **all_cpu_data;
/**
- * struct pstate_adjust_policy - Stores static PID configuration data
- * @sample_rate_ms: PID calculation sample rate in ms
- * @sample_rate_ns: Sample rate calculation in ns
- * @deadband: PID deadband
- * @setpoint: PID Setpoint
- * @p_gain_pct: PID proportional gain
- * @i_gain_pct: PID integral gain
- * @d_gain_pct: PID derivative gain
- *
- * Stores per CPU model static PID configuration data.
- */
-struct pstate_adjust_policy {
- int sample_rate_ms;
- s64 sample_rate_ns;
- int deadband;
- int setpoint;
- int p_gain_pct;
- int d_gain_pct;
- int i_gain_pct;
-};
-
-/**
* struct pstate_funcs - Per CPU model specific callbacks
* @get_max: Callback to get maximum non turbo effective P state
* @get_max_physical: Callback to get maximum non turbo physical P state
@@ -314,7 +267,6 @@ struct pstate_adjust_policy {
* @get_scaling: Callback to get frequency scaling factor
* @get_val: Callback to convert P state to actual MSR write value
* @get_vid: Callback to get VID data for Atom platforms
- * @update_util: Active mode utilization update callback.
*
* Core and Atom CPU models have different way to get P State limits. This
* structure is used to store those callbacks.
@@ -328,20 +280,9 @@ struct pstate_funcs {
int (*get_aperf_mperf_shift)(void);
u64 (*get_val)(struct cpudata*, int pstate);
void (*get_vid)(struct cpudata *);
- void (*update_util)(struct update_util_data *data, u64 time,
- unsigned int flags);
};
static struct pstate_funcs pstate_funcs __read_mostly;
-static struct pstate_adjust_policy pid_params __read_mostly = {
- .sample_rate_ms = 10,
- .sample_rate_ns = 10 * NSEC_PER_MSEC,
- .deadband = 0,
- .setpoint = 97,
- .p_gain_pct = 20,
- .d_gain_pct = 0,
- .i_gain_pct = 0,
-};
static int hwp_active __read_mostly;
static bool per_cpu_limits __read_mostly;
@@ -509,56 +450,6 @@ static inline void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy)
}
#endif
-static signed int pid_calc(struct _pid *pid, int32_t busy)
-{
- signed int result;
- int32_t pterm, dterm, fp_error;
- int32_t integral_limit;
-
- fp_error = pid->setpoint - busy;
-
- if (abs(fp_error) <= pid->deadband)
- return 0;
-
- pterm = mul_fp(pid->p_gain, fp_error);
-
- pid->integral += fp_error;
-
- /*
- * We limit the integral here so that it will never
- * get higher than 30. This prevents it from becoming
- * too large an input over long periods of time and allows
- * it to get factored out sooner.
- *
- * The value of 30 was chosen through experimentation.
- */
- integral_limit = int_tofp(30);
- if (pid->integral > integral_limit)
- pid->integral = integral_limit;
- if (pid->integral < -integral_limit)
- pid->integral = -integral_limit;
-
- dterm = mul_fp(pid->d_gain, fp_error - pid->last_err);
- pid->last_err = fp_error;
-
- result = pterm + mul_fp(pid->integral, pid->i_gain) + dterm;
- result = result + (1 << (FRAC_BITS-1));
- return (signed int)fp_toint(result);
-}
-
-static inline void intel_pstate_pid_reset(struct cpudata *cpu)
-{
- struct _pid *pid = &cpu->pid;
-
- pid->p_gain = percent_fp(pid_params.p_gain_pct);
- pid->d_gain = percent_fp(pid_params.d_gain_pct);
- pid->i_gain = percent_fp(pid_params.i_gain_pct);
- pid->setpoint = int_tofp(pid_params.setpoint);
- pid->last_err = pid->setpoint - int_tofp(100);
- pid->deadband = int_tofp(pid_params.deadband);
- pid->integral = 0;
-}
-
static inline void update_turbo_state(void)
{
u64 misc_en;
@@ -911,82 +802,6 @@ static void intel_pstate_update_policies(void)
cpufreq_update_policy(cpu);
}
-/************************** debugfs begin ************************/
-static int pid_param_set(void *data, u64 val)
-{
- unsigned int cpu;
-
- *(u32 *)data = val;
- pid_params.sample_rate_ns = pid_params.sample_rate_ms * NSEC_PER_MSEC;
- for_each_possible_cpu(cpu)
- if (all_cpu_data[cpu])
- intel_pstate_pid_reset(all_cpu_data[cpu]);
-
- return 0;
-}
-
-static int pid_param_get(void *data, u64 *val)
-{
- *val = *(u32 *)data;
- return 0;
-}
-DEFINE_SIMPLE_ATTRIBUTE(fops_pid_param, pid_param_get, pid_param_set, "%llu\n");
-
-static struct dentry *debugfs_parent;
-
-struct pid_param {
- char *name;
- void *value;
- struct dentry *dentry;
-};
-
-static struct pid_param pid_files[] = {
- {"sample_rate_ms", &pid_params.sample_rate_ms, },
- {"d_gain_pct", &pid_params.d_gain_pct, },
- {"i_gain_pct", &pid_params.i_gain_pct, },
- {"deadband", &pid_params.deadband, },
- {"setpoint", &pid_params.setpoint, },
- {"p_gain_pct", &pid_params.p_gain_pct, },
- {NULL, NULL, }
-};
-
-static void intel_pstate_debug_expose_params(void)
-{
- int i;
-
- debugfs_parent = debugfs_create_dir("pstate_snb", NULL);
- if (IS_ERR_OR_NULL(debugfs_parent))
- return;
-
- for (i = 0; pid_files[i].name; i++) {
- struct dentry *dentry;
-
- dentry = debugfs_create_file(pid_files[i].name, 0660,
- debugfs_parent, pid_files[i].value,
- &fops_pid_param);
- if (!IS_ERR(dentry))
- pid_files[i].dentry = dentry;
- }
-}
-
-static void intel_pstate_debug_hide_params(void)
-{
- int i;
-
- if (IS_ERR_OR_NULL(debugfs_parent))
- return;
-
- for (i = 0; pid_files[i].name; i++) {
- debugfs_remove(pid_files[i].dentry);
- pid_files[i].dentry = NULL;
- }
-
- debugfs_remove(debugfs_parent);
- debugfs_parent = NULL;
-}
-
-/************************** debugfs end ************************/
-
/************************** sysfs begin ************************/
#define show_one(file_name, object) \
static ssize_t show_##file_name \
@@ -1613,8 +1428,7 @@ static inline bool intel_pstate_sample(struct cpudata *cpu, u64 time)
static inline int32_t get_avg_frequency(struct cpudata *cpu)
{
- return mul_ext_fp(cpu->sample.core_avg_perf,
- cpu->pstate.max_pstate_physical * cpu->pstate.scaling);
+ return mul_ext_fp(cpu->sample.core_avg_perf, cpu_khz);
}
static inline int32_t get_avg_pstate(struct cpudata *cpu)
@@ -1623,7 +1437,7 @@ static inline int32_t get_avg_pstate(struct cpudata *cpu)
cpu->sample.core_avg_perf);
}
-static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu)
+static inline int32_t get_target_pstate(struct cpudata *cpu)
{
struct sample *sample = &cpu->sample;
int32_t busy_frac, boost;
@@ -1661,44 +1475,6 @@ static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu)
return target;
}
-static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu)
-{
- int32_t perf_scaled, max_pstate, current_pstate, sample_ratio;
- u64 duration_ns;
-
- /*
- * perf_scaled is the ratio of the average P-state during the last
- * sampling period to the P-state requested last time (in percent).
- *
- * That measures the system's response to the previous P-state
- * selection.
- */
- max_pstate = cpu->pstate.max_pstate_physical;
- current_pstate = cpu->pstate.current_pstate;
- perf_scaled = mul_ext_fp(cpu->sample.core_avg_perf,
- div_fp(100 * max_pstate, current_pstate));
-
- /*
- * Since our utilization update callback will not run unless we are
- * in C0, check if the actual elapsed time is significantly greater (3x)
- * than our sample interval. If it is, then we were idle for a long
- * enough period of time to adjust our performance metric.
- */
- duration_ns = cpu->sample.time - cpu->last_sample_time;
- if ((s64)duration_ns > pid_params.sample_rate_ns * 3) {
- sample_ratio = div_fp(pid_params.sample_rate_ns, duration_ns);
- perf_scaled = mul_fp(perf_scaled, sample_ratio);
- } else {
- sample_ratio = div_fp(100 * (cpu->sample.mperf << cpu->aperf_mperf_shift),
- cpu->sample.tsc);
- if (sample_ratio < int_tofp(1))
- perf_scaled = 0;
- }
-
- cpu->sample.busy_scaled = perf_scaled;
- return cpu->pstate.current_pstate - pid_calc(&cpu->pid, perf_scaled);
-}
-
static int intel_pstate_prepare_request(struct cpudata *cpu, int pstate)
{
int max_pstate = intel_pstate_get_base_pstate(cpu);
@@ -1718,13 +1494,15 @@ static void intel_pstate_update_pstate(struct cpudata *cpu, int pstate)
wrmsrl(MSR_IA32_PERF_CTL, pstate_funcs.get_val(cpu, pstate));
}
-static void intel_pstate_adjust_pstate(struct cpudata *cpu, int target_pstate)
+static void intel_pstate_adjust_pstate(struct cpudata *cpu)
{
int from = cpu->pstate.current_pstate;
struct sample *sample;
+ int target_pstate;
update_turbo_state();
+ target_pstate = get_target_pstate(cpu);
target_pstate = intel_pstate_prepare_request(cpu, target_pstate);
trace_cpu_frequency(target_pstate * cpu->pstate.scaling, cpu->cpu);
intel_pstate_update_pstate(cpu, target_pstate);
@@ -1741,31 +1519,27 @@ static void intel_pstate_adjust_pstate(struct cpudata *cpu, int target_pstate)
fp_toint(cpu->iowait_boost * 100));
}
-static void intel_pstate_update_util_pid(struct update_util_data *data,
- u64 time, unsigned int flags)
-{
- struct cpudata *cpu = container_of(data, struct cpudata, update_util);
- u64 delta_ns = time - cpu->sample.time;
-
- if ((s64)delta_ns < pid_params.sample_rate_ns)
- return;
-
- if (intel_pstate_sample(cpu, time)) {
- int target_pstate;
-
- target_pstate = get_target_pstate_use_performance(cpu);
- intel_pstate_adjust_pstate(cpu, target_pstate);
- }
-}
-
static void intel_pstate_update_util(struct update_util_data *data, u64 time,
unsigned int flags)
{
struct cpudata *cpu = container_of(data, struct cpudata, update_util);
u64 delta_ns;
+ /* Don't allow remote callbacks */
+ if (smp_processor_id() != cpu->cpu)
+ return;
+
if (flags & SCHED_CPUFREQ_IOWAIT) {
cpu->iowait_boost = int_tofp(1);
+ cpu->last_update = time;
+ /*
+ * The last time the busy was 100% so P-state was max anyway
+ * so avoid overhead of computation.
+ */
+ if (fp_toint(cpu->sample.busy_scaled) == 100)
+ return;
+
+ goto set_pstate;
} else if (cpu->iowait_boost) {
/* Clear iowait_boost if the CPU may have been idle. */
delta_ns = time - cpu->last_update;
@@ -1774,15 +1548,12 @@ static void intel_pstate_update_util(struct update_util_data *data, u64 time,
}
cpu->last_update = time;
delta_ns = time - cpu->sample.time;
- if ((s64)delta_ns < INTEL_PSTATE_DEFAULT_SAMPLING_INTERVAL)
+ if ((s64)delta_ns < INTEL_PSTATE_SAMPLING_INTERVAL)
return;
- if (intel_pstate_sample(cpu, time)) {
- int target_pstate;
-
- target_pstate = get_target_pstate_use_cpu_load(cpu);
- intel_pstate_adjust_pstate(cpu, target_pstate);
- }
+set_pstate:
+ if (intel_pstate_sample(cpu, time))
+ intel_pstate_adjust_pstate(cpu);
}
static struct pstate_funcs core_funcs = {
@@ -1792,7 +1563,6 @@ static struct pstate_funcs core_funcs = {
.get_turbo = core_get_turbo_pstate,
.get_scaling = core_get_scaling,
.get_val = core_get_val,
- .update_util = intel_pstate_update_util_pid,
};
static const struct pstate_funcs silvermont_funcs = {
@@ -1803,7 +1573,6 @@ static const struct pstate_funcs silvermont_funcs = {
.get_val = atom_get_val,
.get_scaling = silvermont_get_scaling,
.get_vid = atom_get_vid,
- .update_util = intel_pstate_update_util,
};
static const struct pstate_funcs airmont_funcs = {
@@ -1814,7 +1583,6 @@ static const struct pstate_funcs airmont_funcs = {
.get_val = atom_get_val,
.get_scaling = airmont_get_scaling,
.get_vid = atom_get_vid,
- .update_util = intel_pstate_update_util,
};
static const struct pstate_funcs knl_funcs = {
@@ -1825,7 +1593,6 @@ static const struct pstate_funcs knl_funcs = {
.get_aperf_mperf_shift = knl_get_aperf_mperf_shift,
.get_scaling = core_get_scaling,
.get_val = core_get_val,
- .update_util = intel_pstate_update_util_pid,
};
static const struct pstate_funcs bxt_funcs = {
@@ -1835,7 +1602,6 @@ static const struct pstate_funcs bxt_funcs = {
.get_turbo = core_get_turbo_pstate,
.get_scaling = core_get_scaling,
.get_val = core_get_val,
- .update_util = intel_pstate_update_util,
};
#define ICPU(model, policy) \
@@ -1879,8 +1645,6 @@ static const struct x86_cpu_id intel_pstate_cpu_ee_disable_ids[] = {
{}
};
-static bool pid_in_use(void);
-
static int intel_pstate_init_cpu(unsigned int cpunum)
{
struct cpudata *cpu;
@@ -1911,8 +1675,6 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
intel_pstate_disable_ee(cpunum);
intel_pstate_hwp_enable(cpu);
- } else if (pid_in_use()) {
- intel_pstate_pid_reset(cpu);
}
intel_pstate_get_cpu_pstates(cpu);
@@ -1935,7 +1697,7 @@ static void intel_pstate_set_update_util_hook(unsigned int cpu_num)
/* Prevent intel_pstate_update_util() from using stale data. */
cpu->sample.time = 0;
cpufreq_add_update_util_hook(cpu_num, &cpu->update_util,
- pstate_funcs.update_util);
+ intel_pstate_update_util);
cpu->update_util_set = true;
}
@@ -2133,7 +1895,6 @@ static int __intel_pstate_cpu_init(struct cpufreq_policy *policy)
policy->cpuinfo.max_freq *= cpu->pstate.scaling;
intel_pstate_init_acpi_perf_limits(policy);
- cpumask_set_cpu(policy->cpu, policy->cpus);
policy->fast_switch_possible = true;
@@ -2147,7 +1908,6 @@ static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
if (ret)
return ret;
- policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
if (IS_ENABLED(CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE))
policy->policy = CPUFREQ_POLICY_PERFORMANCE;
else
@@ -2262,12 +2022,6 @@ static struct cpufreq_driver intel_cpufreq = {
static struct cpufreq_driver *default_driver = &intel_pstate;
-static bool pid_in_use(void)
-{
- return intel_pstate_driver == &intel_pstate &&
- pstate_funcs.update_util == intel_pstate_update_util_pid;
-}
-
static void intel_pstate_driver_cleanup(void)
{
unsigned int cpu;
@@ -2302,9 +2056,6 @@ static int intel_pstate_register_driver(struct cpufreq_driver *driver)
global.min_perf_pct = min_perf_pct_min();
- if (pid_in_use())
- intel_pstate_debug_expose_params();
-
return 0;
}
@@ -2313,9 +2064,6 @@ static int intel_pstate_unregister_driver(void)
if (hwp_active)
return -EBUSY;
- if (pid_in_use())
- intel_pstate_debug_hide_params();
-
cpufreq_unregister_driver(intel_pstate_driver);
intel_pstate_driver_cleanup();
@@ -2383,24 +2131,6 @@ static int __init intel_pstate_msrs_not_valid(void)
return 0;
}
-#ifdef CONFIG_ACPI
-static void intel_pstate_use_acpi_profile(void)
-{
- switch (acpi_gbl_FADT.preferred_profile) {
- case PM_MOBILE:
- case PM_TABLET:
- case PM_APPLIANCE_PC:
- case PM_DESKTOP:
- case PM_WORKSTATION:
- pstate_funcs.update_util = intel_pstate_update_util;
- }
-}
-#else
-static void intel_pstate_use_acpi_profile(void)
-{
-}
-#endif
-
static void __init copy_cpu_funcs(struct pstate_funcs *funcs)
{
pstate_funcs.get_max = funcs->get_max;
@@ -2410,10 +2140,7 @@ static void __init copy_cpu_funcs(struct pstate_funcs *funcs)
pstate_funcs.get_scaling = funcs->get_scaling;
pstate_funcs.get_val = funcs->get_val;
pstate_funcs.get_vid = funcs->get_vid;
- pstate_funcs.update_util = funcs->update_util;
pstate_funcs.get_aperf_mperf_shift = funcs->get_aperf_mperf_shift;
-
- intel_pstate_use_acpi_profile();
}
#ifdef CONFIG_ACPI
@@ -2467,39 +2194,31 @@ enum {
PPC,
};
-struct hw_vendor_info {
- u16 valid;
- char oem_id[ACPI_OEM_ID_SIZE];
- char oem_table_id[ACPI_OEM_TABLE_ID_SIZE];
- int oem_pwr_table;
-};
-
/* Hardware vendor-specific info that has its own power management modes */
-static struct hw_vendor_info vendor_info[] __initdata = {
- {1, "HP ", "ProLiant", PSS},
- {1, "ORACLE", "X4-2 ", PPC},
- {1, "ORACLE", "X4-2L ", PPC},
- {1, "ORACLE", "X4-2B ", PPC},
- {1, "ORACLE", "X3-2 ", PPC},
- {1, "ORACLE", "X3-2L ", PPC},
- {1, "ORACLE", "X3-2B ", PPC},
- {1, "ORACLE", "X4470M2 ", PPC},
- {1, "ORACLE", "X4270M3 ", PPC},
- {1, "ORACLE", "X4270M2 ", PPC},
- {1, "ORACLE", "X4170M2 ", PPC},
- {1, "ORACLE", "X4170 M3", PPC},
- {1, "ORACLE", "X4275 M3", PPC},
- {1, "ORACLE", "X6-2 ", PPC},
- {1, "ORACLE", "Sudbury ", PPC},
- {0, "", ""},
+static struct acpi_platform_list plat_info[] __initdata = {
+ {"HP ", "ProLiant", 0, ACPI_SIG_FADT, all_versions, 0, PSS},
+ {"ORACLE", "X4-2 ", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
+ {"ORACLE", "X4-2L ", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
+ {"ORACLE", "X4-2B ", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
+ {"ORACLE", "X3-2 ", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
+ {"ORACLE", "X3-2L ", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
+ {"ORACLE", "X3-2B ", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
+ {"ORACLE", "X4470M2 ", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
+ {"ORACLE", "X4270M3 ", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
+ {"ORACLE", "X4270M2 ", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
+ {"ORACLE", "X4170M2 ", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
+ {"ORACLE", "X4170 M3", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
+ {"ORACLE", "X4275 M3", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
+ {"ORACLE", "X6-2 ", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
+ {"ORACLE", "Sudbury ", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
+ { } /* End */
};
static bool __init intel_pstate_platform_pwr_mgmt_exists(void)
{
- struct acpi_table_header hdr;
- struct hw_vendor_info *v_info;
const struct x86_cpu_id *id;
u64 misc_pwr;
+ int idx;
id = x86_match_cpu(intel_pstate_cpu_oob_ids);
if (id) {
@@ -2508,21 +2227,15 @@ static bool __init intel_pstate_platform_pwr_mgmt_exists(void)
return true;
}
- if (acpi_disabled ||
- ACPI_FAILURE(acpi_get_table_header(ACPI_SIG_FADT, 0, &hdr)))
+ idx = acpi_match_platform_list(plat_info);
+ if (idx < 0)
return false;
- for (v_info = vendor_info; v_info->valid; v_info++) {
- if (!strncmp(hdr.oem_id, v_info->oem_id, ACPI_OEM_ID_SIZE) &&
- !strncmp(hdr.oem_table_id, v_info->oem_table_id,
- ACPI_OEM_TABLE_ID_SIZE))
- switch (v_info->oem_pwr_table) {
- case PSS:
- return intel_pstate_no_acpi_pss();
- case PPC:
- return intel_pstate_has_acpi_ppc() &&
- (!force_load);
- }
+ switch (plat_info[idx].data) {
+ case PSS:
+ return intel_pstate_no_acpi_pss();
+ case PPC:
+ return intel_pstate_has_acpi_ppc() && !force_load;
}
return false;
@@ -2557,9 +2270,7 @@ static int __init intel_pstate_init(void)
if (x86_match_cpu(hwp_support_ids)) {
copy_cpu_funcs(&core_funcs);
- if (no_hwp) {
- pstate_funcs.update_util = intel_pstate_update_util;
- } else {
+ if (!no_hwp) {
hwp_active++;
intel_pstate.attr = hwp_cpufreq_attrs;
goto hwp_cpu_matched;
diff --git a/drivers/cpufreq/longrun.c b/drivers/cpufreq/longrun.c
index 074971b12635..542aa9adba1a 100644
--- a/drivers/cpufreq/longrun.c
+++ b/drivers/cpufreq/longrun.c
@@ -270,7 +270,6 @@ static int longrun_cpu_init(struct cpufreq_policy *policy)
/* cpuinfo and default policy values */
policy->cpuinfo.min_freq = longrun_low_freq;
policy->cpuinfo.max_freq = longrun_high_freq;
- policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
longrun_get_policy(policy);
return 0;
diff --git a/drivers/cpufreq/loongson2_cpufreq.c b/drivers/cpufreq/loongson2_cpufreq.c
index 9ac27b22476c..da344696beed 100644
--- a/drivers/cpufreq/loongson2_cpufreq.c
+++ b/drivers/cpufreq/loongson2_cpufreq.c
@@ -114,7 +114,7 @@ static struct cpufreq_driver loongson2_cpufreq_driver = {
.attr = cpufreq_generic_attr,
};
-static struct platform_device_id platform_device_ids[] = {
+static const struct platform_device_id platform_device_ids[] = {
{
.name = "loongson2_cpufreq",
},
diff --git a/drivers/cpufreq/mt8173-cpufreq.c b/drivers/cpufreq/mediatek-cpufreq.c
index f9f00fb4bc3a..18c4bd9a5c65 100644
--- a/drivers/cpufreq/mt8173-cpufreq.c
+++ b/drivers/cpufreq/mediatek-cpufreq.c
@@ -507,7 +507,7 @@ static int mtk_cpufreq_exit(struct cpufreq_policy *policy)
return 0;
}
-static struct cpufreq_driver mt8173_cpufreq_driver = {
+static struct cpufreq_driver mtk_cpufreq_driver = {
.flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK |
CPUFREQ_HAVE_GOVERNOR_PER_POLICY,
.verify = cpufreq_generic_frequency_table_verify,
@@ -520,7 +520,7 @@ static struct cpufreq_driver mt8173_cpufreq_driver = {
.attr = cpufreq_generic_attr,
};
-static int mt8173_cpufreq_probe(struct platform_device *pdev)
+static int mtk_cpufreq_probe(struct platform_device *pdev)
{
struct mtk_cpu_dvfs_info *info, *tmp;
int cpu, ret;
@@ -547,7 +547,7 @@ static int mt8173_cpufreq_probe(struct platform_device *pdev)
list_add(&info->list_head, &dvfs_info_list);
}
- ret = cpufreq_register_driver(&mt8173_cpufreq_driver);
+ ret = cpufreq_register_driver(&mtk_cpufreq_driver);
if (ret) {
dev_err(&pdev->dev, "failed to register mtk cpufreq driver\n");
goto release_dvfs_info_list;
@@ -564,15 +564,18 @@ release_dvfs_info_list:
return ret;
}
-static struct platform_driver mt8173_cpufreq_platdrv = {
+static struct platform_driver mtk_cpufreq_platdrv = {
.driver = {
- .name = "mt8173-cpufreq",
+ .name = "mtk-cpufreq",
},
- .probe = mt8173_cpufreq_probe,
+ .probe = mtk_cpufreq_probe,
};
/* List of machines supported by this driver */
-static const struct of_device_id mt8173_cpufreq_machines[] __initconst = {
+static const struct of_device_id mtk_cpufreq_machines[] __initconst = {
+ { .compatible = "mediatek,mt2701", },
+ { .compatible = "mediatek,mt7622", },
+ { .compatible = "mediatek,mt7623", },
{ .compatible = "mediatek,mt817x", },
{ .compatible = "mediatek,mt8173", },
{ .compatible = "mediatek,mt8176", },
@@ -580,7 +583,7 @@ static const struct of_device_id mt8173_cpufreq_machines[] __initconst = {
{ }
};
-static int __init mt8173_cpufreq_driver_init(void)
+static int __init mtk_cpufreq_driver_init(void)
{
struct device_node *np;
const struct of_device_id *match;
@@ -591,14 +594,14 @@ static int __init mt8173_cpufreq_driver_init(void)
if (!np)
return -ENODEV;
- match = of_match_node(mt8173_cpufreq_machines, np);
+ match = of_match_node(mtk_cpufreq_machines, np);
of_node_put(np);
if (!match) {
- pr_warn("Machine is not compatible with mt8173-cpufreq\n");
+ pr_warn("Machine is not compatible with mtk-cpufreq\n");
return -ENODEV;
}
- err = platform_driver_register(&mt8173_cpufreq_platdrv);
+ err = platform_driver_register(&mtk_cpufreq_platdrv);
if (err)
return err;
@@ -608,7 +611,7 @@ static int __init mt8173_cpufreq_driver_init(void)
* and the device registration codes are put here to handle defer
* probing.
*/
- pdev = platform_device_register_simple("mt8173-cpufreq", -1, NULL, 0);
+ pdev = platform_device_register_simple("mtk-cpufreq", -1, NULL, 0);
if (IS_ERR(pdev)) {
pr_err("failed to register mtk-cpufreq platform device\n");
return PTR_ERR(pdev);
@@ -616,4 +619,4 @@ static int __init mt8173_cpufreq_driver_init(void)
return 0;
}
-device_initcall(mt8173_cpufreq_driver_init);
+device_initcall(mtk_cpufreq_driver_init);
diff --git a/drivers/cpufreq/pmac32-cpufreq.c b/drivers/cpufreq/pmac32-cpufreq.c
index ff44016ea031..61ae06ca008e 100644
--- a/drivers/cpufreq/pmac32-cpufreq.c
+++ b/drivers/cpufreq/pmac32-cpufreq.c
@@ -442,7 +442,8 @@ static struct cpufreq_driver pmac_cpufreq_driver = {
.init = pmac_cpufreq_cpu_init,
.suspend = pmac_cpufreq_suspend,
.resume = pmac_cpufreq_resume,
- .flags = CPUFREQ_PM_NO_WARN,
+ .flags = CPUFREQ_PM_NO_WARN |
+ CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING,
.attr = cpufreq_generic_attr,
.name = "powermac",
};
@@ -626,14 +627,16 @@ static int __init pmac_cpufreq_setup(void)
if (!value)
goto out;
cur_freq = (*value) / 1000;
- transition_latency = CPUFREQ_ETERNAL;
/* Check for 7447A based MacRISC3 */
if (of_machine_is_compatible("MacRISC3") &&
of_get_property(cpunode, "dynamic-power-step", NULL) &&
PVR_VER(mfspr(SPRN_PVR)) == 0x8003) {
pmac_cpufreq_init_7447A(cpunode);
+
+ /* Allow dynamic switching */
transition_latency = 8000000;
+ pmac_cpufreq_driver.flags &= ~CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING;
/* Check for other MacRISC3 machines */
} else if (of_machine_is_compatible("PowerBook3,4") ||
of_machine_is_compatible("PowerBook3,5") ||
diff --git a/drivers/cpufreq/pmac64-cpufreq.c b/drivers/cpufreq/pmac64-cpufreq.c
index 267e0894c62d..be623dd7b9f2 100644
--- a/drivers/cpufreq/pmac64-cpufreq.c
+++ b/drivers/cpufreq/pmac64-cpufreq.c
@@ -516,7 +516,7 @@ static int __init g5_pm72_cpufreq_init(struct device_node *cpunode)
goto bail;
}
- DBG("cpufreq: i2c clock chip found: %s\n", hwclock->full_name);
+ DBG("cpufreq: i2c clock chip found: %pOF\n", hwclock);
/* Now get all the platform functions */
pfunc_cpu_getfreq =
diff --git a/drivers/cpufreq/powernow-k7.c b/drivers/cpufreq/powernow-k7.c
index 9f013ed42977..80ac313e6c59 100644
--- a/drivers/cpufreq/powernow-k7.c
+++ b/drivers/cpufreq/powernow-k7.c
@@ -578,7 +578,7 @@ static int acer_cpufreq_pst(const struct dmi_system_id *d)
* A BIOS update is all that can save them.
* Mention this, and disable cpufreq.
*/
-static struct dmi_system_id powernow_dmi_table[] = {
+static const struct dmi_system_id powernow_dmi_table[] = {
{
.callback = acer_cpufreq_pst,
.ident = "Acer Aspire",
diff --git a/drivers/cpufreq/s5pv210-cpufreq.c b/drivers/cpufreq/s5pv210-cpufreq.c
index f82074eea779..5d31c2db12a3 100644
--- a/drivers/cpufreq/s5pv210-cpufreq.c
+++ b/drivers/cpufreq/s5pv210-cpufreq.c
@@ -602,6 +602,7 @@ static int s5pv210_cpufreq_probe(struct platform_device *pdev)
}
clk_base = of_iomap(np, 0);
+ of_node_put(np);
if (!clk_base) {
pr_err("%s: failed to map clock registers\n", __func__);
return -EFAULT;
@@ -612,6 +613,7 @@ static int s5pv210_cpufreq_probe(struct platform_device *pdev)
if (id < 0 || id >= ARRAY_SIZE(dmc_base)) {
pr_err("%s: failed to get alias of dmc node '%s'\n",
__func__, np->name);
+ of_node_put(np);
return id;
}
@@ -619,6 +621,7 @@ static int s5pv210_cpufreq_probe(struct platform_device *pdev)
if (!dmc_base[id]) {
pr_err("%s: failed to map dmc%d registers\n",
__func__, id);
+ of_node_put(np);
return -EFAULT;
}
}
diff --git a/drivers/cpufreq/sa1100-cpufreq.c b/drivers/cpufreq/sa1100-cpufreq.c
index 728eab77e8e0..e2d8a77c36d5 100644
--- a/drivers/cpufreq/sa1100-cpufreq.c
+++ b/drivers/cpufreq/sa1100-cpufreq.c
@@ -197,11 +197,12 @@ static int sa1100_target(struct cpufreq_policy *policy, unsigned int ppcr)
static int __init sa1100_cpu_init(struct cpufreq_policy *policy)
{
- return cpufreq_generic_init(policy, sa11x0_freq_table, CPUFREQ_ETERNAL);
+ return cpufreq_generic_init(policy, sa11x0_freq_table, 0);
}
static struct cpufreq_driver sa1100_driver __refdata = {
- .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
+ .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK |
+ CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = sa1100_target,
.get = sa11x0_getspeed,
diff --git a/drivers/cpufreq/sa1110-cpufreq.c b/drivers/cpufreq/sa1110-cpufreq.c
index 2bac9b6cfeea..66e5fb088ecc 100644
--- a/drivers/cpufreq/sa1110-cpufreq.c
+++ b/drivers/cpufreq/sa1110-cpufreq.c
@@ -306,13 +306,14 @@ static int sa1110_target(struct cpufreq_policy *policy, unsigned int ppcr)
static int __init sa1110_cpu_init(struct cpufreq_policy *policy)
{
- return cpufreq_generic_init(policy, sa11x0_freq_table, CPUFREQ_ETERNAL);
+ return cpufreq_generic_init(policy, sa11x0_freq_table, 0);
}
/* sa1110_driver needs __refdata because it must remain after init registers
* it with cpufreq_register_driver() */
static struct cpufreq_driver sa1110_driver __refdata = {
- .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
+ .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK |
+ CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = sa1110_target,
.get = sa11x0_getspeed,
diff --git a/drivers/cpufreq/sh-cpufreq.c b/drivers/cpufreq/sh-cpufreq.c
index 719c3d9f07fb..28893d435cf5 100644
--- a/drivers/cpufreq/sh-cpufreq.c
+++ b/drivers/cpufreq/sh-cpufreq.c
@@ -137,8 +137,6 @@ static int sh_cpufreq_cpu_init(struct cpufreq_policy *policy)
(clk_round_rate(cpuclk, ~0UL) + 500) / 1000;
}
- policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
-
dev_info(dev, "CPU Frequencies - Minimum %u.%03u MHz, "
"Maximum %u.%03u MHz.\n",
policy->min / 1000, policy->min % 1000,
@@ -159,6 +157,7 @@ static int sh_cpufreq_cpu_exit(struct cpufreq_policy *policy)
static struct cpufreq_driver sh_cpufreq_driver = {
.name = "sh",
+ .flags = CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING,
.get = sh_cpufreq_get,
.target = sh_cpufreq_target,
.verify = sh_cpufreq_verify,
diff --git a/drivers/cpufreq/speedstep-ich.c b/drivers/cpufreq/speedstep-ich.c
index b86953a3ddc4..0412a246a785 100644
--- a/drivers/cpufreq/speedstep-ich.c
+++ b/drivers/cpufreq/speedstep-ich.c
@@ -207,7 +207,7 @@ static unsigned int speedstep_detect_chipset(void)
* 8100 which use a pretty old revision of the 82815
* host bridge. Abort on these systems.
*/
- static struct pci_dev *hostbridge;
+ struct pci_dev *hostbridge;
hostbridge = pci_get_subsys(PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_82815_MC,
diff --git a/drivers/cpufreq/speedstep-lib.c b/drivers/cpufreq/speedstep-lib.c
index 1b8062182c81..ccab452a4ef5 100644
--- a/drivers/cpufreq/speedstep-lib.c
+++ b/drivers/cpufreq/speedstep-lib.c
@@ -35,7 +35,7 @@ static int relaxed_check;
static unsigned int pentium3_get_frequency(enum speedstep_processor processor)
{
/* See table 14 of p3_ds.pdf and table 22 of 29834003.pdf */
- struct {
+ static const struct {
unsigned int ratio; /* Frequency Multiplier (x10) */
u8 bitmap; /* power on configuration bits
[27, 25:22] (in MSR 0x2a) */
@@ -58,7 +58,7 @@ static unsigned int pentium3_get_frequency(enum speedstep_processor processor)
};
/* PIII(-M) FSB settings: see table b1-b of 24547206.pdf */
- struct {
+ static const struct {
unsigned int value; /* Front Side Bus speed in MHz */
u8 bitmap; /* power on configuration bits [18: 19]
(in MSR 0x2a) */
diff --git a/drivers/cpufreq/speedstep-smi.c b/drivers/cpufreq/speedstep-smi.c
index 37b30071c220..d23f24ccff38 100644
--- a/drivers/cpufreq/speedstep-smi.c
+++ b/drivers/cpufreq/speedstep-smi.c
@@ -266,7 +266,6 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy)
pr_debug("workaround worked.\n");
}
- policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
return cpufreq_table_validate_and_show(policy, speedstep_freqs);
}
@@ -290,6 +289,7 @@ static int speedstep_resume(struct cpufreq_policy *policy)
static struct cpufreq_driver speedstep_driver = {
.name = "speedstep-smi",
+ .flags = CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = speedstep_target,
.init = speedstep_cpu_init,
diff --git a/drivers/cpufreq/sti-cpufreq.c b/drivers/cpufreq/sti-cpufreq.c
index d2d0430d09d4..47105735df12 100644
--- a/drivers/cpufreq/sti-cpufreq.c
+++ b/drivers/cpufreq/sti-cpufreq.c
@@ -65,8 +65,8 @@ static int sti_cpufreq_fetch_major(void) {
ret = of_property_read_u32_index(np, "st,syscfg",
MAJOR_ID_INDEX, &major_offset);
if (ret) {
- dev_err(dev, "No major number offset provided in %s [%d]\n",
- np->full_name, ret);
+ dev_err(dev, "No major number offset provided in %pOF [%d]\n",
+ np, ret);
return ret;
}
@@ -92,8 +92,8 @@ static int sti_cpufreq_fetch_minor(void)
MINOR_ID_INDEX, &minor_offset);
if (ret) {
dev_err(dev,
- "No minor number offset provided %s [%d]\n",
- np->full_name, ret);
+ "No minor number offset provided %pOF [%d]\n",
+ np, ret);
return ret;
}
diff --git a/drivers/cpufreq/tango-cpufreq.c b/drivers/cpufreq/tango-cpufreq.c
new file mode 100644
index 000000000000..89a7f860bfe8
--- /dev/null
+++ b/drivers/cpufreq/tango-cpufreq.c
@@ -0,0 +1,38 @@
+#include <linux/of.h>
+#include <linux/cpu.h>
+#include <linux/clk.h>
+#include <linux/pm_opp.h>
+#include <linux/platform_device.h>
+
+static const struct of_device_id machines[] __initconst = {
+ { .compatible = "sigma,tango4" },
+ { /* sentinel */ }
+};
+
+static int __init tango_cpufreq_init(void)
+{
+ struct device *cpu_dev = get_cpu_device(0);
+ unsigned long max_freq;
+ struct clk *cpu_clk;
+ void *res;
+
+ if (!of_match_node(machines, of_root))
+ return -ENODEV;
+
+ cpu_clk = clk_get(cpu_dev, NULL);
+ if (IS_ERR(cpu_clk))
+ return -ENODEV;
+
+ max_freq = clk_get_rate(cpu_clk);
+
+ dev_pm_opp_add(cpu_dev, max_freq / 1, 0);
+ dev_pm_opp_add(cpu_dev, max_freq / 2, 0);
+ dev_pm_opp_add(cpu_dev, max_freq / 3, 0);
+ dev_pm_opp_add(cpu_dev, max_freq / 5, 0);
+ dev_pm_opp_add(cpu_dev, max_freq / 9, 0);
+
+ res = platform_device_register_data(NULL, "cpufreq-dt", -1, NULL, 0);
+
+ return PTR_ERR_OR_ZERO(res);
+}
+device_initcall(tango_cpufreq_init);
diff --git a/drivers/cpufreq/ti-cpufreq.c b/drivers/cpufreq/ti-cpufreq.c
index a7b5658c0460..b29cd3398463 100644
--- a/drivers/cpufreq/ti-cpufreq.c
+++ b/drivers/cpufreq/ti-cpufreq.c
@@ -245,8 +245,6 @@ static int ti_cpufreq_init(void)
if (ret)
goto fail_put_node;
- of_node_put(opp_data->opp_node);
-
ret = PTR_ERR_OR_ZERO(dev_pm_opp_set_supported_hw(opp_data->cpu_dev,
version, VERSION_COUNT));
if (ret) {
@@ -255,6 +253,8 @@ static int ti_cpufreq_init(void)
goto fail_put_node;
}
+ of_node_put(opp_data->opp_node);
+
register_cpufreq_dt:
platform_device_register_simple("cpufreq-dt", -1, NULL, 0);
diff --git a/drivers/cpufreq/unicore2-cpufreq.c b/drivers/cpufreq/unicore2-cpufreq.c
index 6f9dfa80563a..db62d9844751 100644
--- a/drivers/cpufreq/unicore2-cpufreq.c
+++ b/drivers/cpufreq/unicore2-cpufreq.c
@@ -58,13 +58,12 @@ static int __init ucv2_cpu_init(struct cpufreq_policy *policy)
policy->min = policy->cpuinfo.min_freq = 250000;
policy->max = policy->cpuinfo.max_freq = 1000000;
- policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
policy->clk = clk_get(NULL, "MAIN_CLK");
return PTR_ERR_OR_ZERO(policy->clk);
}
static struct cpufreq_driver ucv2_driver = {
- .flags = CPUFREQ_STICKY,
+ .flags = CPUFREQ_STICKY | CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING,
.verify = ucv2_verify_speed,
.target = ucv2_target,
.get = cpufreq_generic_get,
diff --git a/drivers/cpuidle/Makefile b/drivers/cpuidle/Makefile
index 3ba81b1dffad..0b67a05a7aae 100644
--- a/drivers/cpuidle/Makefile
+++ b/drivers/cpuidle/Makefile
@@ -5,6 +5,7 @@
obj-y += cpuidle.o driver.o governor.o sysfs.o governors/
obj-$(CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED) += coupled.o
obj-$(CONFIG_DT_IDLE_STATES) += dt_idle_states.o
+obj-$(CONFIG_ARCH_HAS_CPU_RELAX) += poll_state.o
##################################################################################
# ARM SoC drivers
diff --git a/drivers/cpuidle/coupled.c b/drivers/cpuidle/coupled.c
index 71e586d7df71..147f38ea0fcd 100644
--- a/drivers/cpuidle/coupled.c
+++ b/drivers/cpuidle/coupled.c
@@ -119,13 +119,13 @@ struct cpuidle_coupled {
#define CPUIDLE_COUPLED_NOT_IDLE (-1)
-static DEFINE_PER_CPU(struct call_single_data, cpuidle_coupled_poke_cb);
+static DEFINE_PER_CPU(call_single_data_t, cpuidle_coupled_poke_cb);
/*
* The cpuidle_coupled_poke_pending mask is used to avoid calling
- * __smp_call_function_single with the per cpu call_single_data struct already
+ * __smp_call_function_single with the per cpu call_single_data_t struct already
* in use. This prevents a deadlock where two cpus are waiting for each others
- * call_single_data struct to be available
+ * call_single_data_t struct to be available
*/
static cpumask_t cpuidle_coupled_poke_pending;
@@ -339,7 +339,7 @@ static void cpuidle_coupled_handle_poke(void *info)
*/
static void cpuidle_coupled_poke(int cpu)
{
- struct call_single_data *csd = &per_cpu(cpuidle_coupled_poke_cb, cpu);
+ call_single_data_t *csd = &per_cpu(cpuidle_coupled_poke_cb, cpu);
if (!cpumask_test_and_set_cpu(cpu, &cpuidle_coupled_poke_pending))
smp_call_function_single_async(cpu, csd);
@@ -651,7 +651,7 @@ int cpuidle_coupled_register_device(struct cpuidle_device *dev)
{
int cpu;
struct cpuidle_device *other_dev;
- struct call_single_data *csd;
+ call_single_data_t *csd;
struct cpuidle_coupled *coupled;
if (cpumask_empty(&dev->coupled_cpus))
diff --git a/drivers/cpuidle/cpuidle-cps.c b/drivers/cpuidle/cpuidle-cps.c
index 12b9145913de..72b5e47286b4 100644
--- a/drivers/cpuidle/cpuidle-cps.c
+++ b/drivers/cpuidle/cpuidle-cps.c
@@ -37,7 +37,7 @@ static int cps_nc_enter(struct cpuidle_device *dev,
* TODO: don't treat core 0 specially, just prevent the final core
* TODO: remap interrupt affinity temporarily
*/
- if (!cpu_data[dev->cpu].core && (index > STATE_NC_WAIT))
+ if (cpus_are_siblings(0, dev->cpu) && (index > STATE_NC_WAIT))
index = STATE_NC_WAIT;
/* Select the appropriate cps_pm_state */
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index 60bb64f4329d..484cc8909d5c 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -77,7 +77,7 @@ static int find_deepest_state(struct cpuidle_driver *drv,
struct cpuidle_device *dev,
unsigned int max_latency,
unsigned int forbidden_flags,
- bool freeze)
+ bool s2idle)
{
unsigned int latency_req = 0;
int i, ret = 0;
@@ -89,7 +89,7 @@ static int find_deepest_state(struct cpuidle_driver *drv,
if (s->disabled || su->disable || s->exit_latency <= latency_req
|| s->exit_latency > max_latency
|| (s->flags & forbidden_flags)
- || (freeze && !s->enter_freeze))
+ || (s2idle && !s->enter_s2idle))
continue;
latency_req = s->exit_latency;
@@ -128,7 +128,7 @@ int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
}
#ifdef CONFIG_SUSPEND
-static void enter_freeze_proper(struct cpuidle_driver *drv,
+static void enter_s2idle_proper(struct cpuidle_driver *drv,
struct cpuidle_device *dev, int index)
{
/*
@@ -143,7 +143,7 @@ static void enter_freeze_proper(struct cpuidle_driver *drv,
* suspended is generally unsafe.
*/
stop_critical_timings();
- drv->states[index].enter_freeze(dev, drv, index);
+ drv->states[index].enter_s2idle(dev, drv, index);
WARN_ON(!irqs_disabled());
/*
* timekeeping_resume() that will be called by tick_unfreeze() for the
@@ -155,25 +155,25 @@ static void enter_freeze_proper(struct cpuidle_driver *drv,
}
/**
- * cpuidle_enter_freeze - Enter an idle state suitable for suspend-to-idle.
+ * cpuidle_enter_s2idle - Enter an idle state suitable for suspend-to-idle.
* @drv: cpuidle driver for the given CPU.
* @dev: cpuidle device for the given CPU.
*
- * If there are states with the ->enter_freeze callback, find the deepest of
+ * If there are states with the ->enter_s2idle callback, find the deepest of
* them and enter it with frozen tick.
*/
-int cpuidle_enter_freeze(struct cpuidle_driver *drv, struct cpuidle_device *dev)
+int cpuidle_enter_s2idle(struct cpuidle_driver *drv, struct cpuidle_device *dev)
{
int index;
/*
- * Find the deepest state with ->enter_freeze present, which guarantees
+ * Find the deepest state with ->enter_s2idle present, which guarantees
* that interrupts won't be enabled when it exits and allows the tick to
* be frozen safely.
*/
index = find_deepest_state(drv, dev, UINT_MAX, 0, true);
if (index > 0)
- enter_freeze_proper(drv, dev, index);
+ enter_s2idle_proper(drv, dev, index);
return index;
}
diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c
index e53fb861beb0..dc32f34e68d9 100644
--- a/drivers/cpuidle/driver.c
+++ b/drivers/cpuidle/driver.c
@@ -179,36 +179,6 @@ static void __cpuidle_driver_init(struct cpuidle_driver *drv)
}
}
-#ifdef CONFIG_ARCH_HAS_CPU_RELAX
-static int __cpuidle poll_idle(struct cpuidle_device *dev,
- struct cpuidle_driver *drv, int index)
-{
- local_irq_enable();
- if (!current_set_polling_and_test()) {
- while (!need_resched())
- cpu_relax();
- }
- current_clr_polling();
-
- return index;
-}
-
-static void poll_idle_init(struct cpuidle_driver *drv)
-{
- struct cpuidle_state *state = &drv->states[0];
-
- snprintf(state->name, CPUIDLE_NAME_LEN, "POLL");
- snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE");
- state->exit_latency = 0;
- state->target_residency = 0;
- state->power_usage = -1;
- state->enter = poll_idle;
- state->disabled = false;
-}
-#else
-static void poll_idle_init(struct cpuidle_driver *drv) {}
-#endif /* !CONFIG_ARCH_HAS_CPU_RELAX */
-
/**
* __cpuidle_register_driver: register the driver
* @drv: a valid pointer to a struct cpuidle_driver
@@ -246,8 +216,6 @@ static int __cpuidle_register_driver(struct cpuidle_driver *drv)
on_each_cpu_mask(drv->cpumask, cpuidle_setup_broadcast_timer,
(void *)1, 1);
- poll_idle_init(drv);
-
return 0;
}
diff --git a/drivers/cpuidle/dt_idle_states.c b/drivers/cpuidle/dt_idle_states.c
index ae8eb0359889..53342b7f1010 100644
--- a/drivers/cpuidle/dt_idle_states.c
+++ b/drivers/cpuidle/dt_idle_states.c
@@ -41,9 +41,9 @@ static int init_state_node(struct cpuidle_state *idle_state,
/*
* Since this is not a "coupled" state, it's safe to assume interrupts
* won't be enabled when it exits allowing the tick to be frozen
- * safely. So enter() can be also enter_freeze() callback.
+ * safely. So enter() can be also enter_s2idle() callback.
*/
- idle_state->enter_freeze = match_id->data;
+ idle_state->enter_s2idle = match_id->data;
err = of_property_read_u32(state_node, "wakeup-latency-us",
&idle_state->exit_latency);
@@ -53,16 +53,16 @@ static int init_state_node(struct cpuidle_state *idle_state,
err = of_property_read_u32(state_node, "entry-latency-us",
&entry_latency);
if (err) {
- pr_debug(" * %s missing entry-latency-us property\n",
- state_node->full_name);
+ pr_debug(" * %pOF missing entry-latency-us property\n",
+ state_node);
return -EINVAL;
}
err = of_property_read_u32(state_node, "exit-latency-us",
&exit_latency);
if (err) {
- pr_debug(" * %s missing exit-latency-us property\n",
- state_node->full_name);
+ pr_debug(" * %pOF missing exit-latency-us property\n",
+ state_node);
return -EINVAL;
}
/*
@@ -75,8 +75,8 @@ static int init_state_node(struct cpuidle_state *idle_state,
err = of_property_read_u32(state_node, "min-residency-us",
&idle_state->target_residency);
if (err) {
- pr_debug(" * %s missing min-residency-us property\n",
- state_node->full_name);
+ pr_debug(" * %pOF missing min-residency-us property\n",
+ state_node);
return -EINVAL;
}
@@ -186,8 +186,8 @@ int dt_init_idle_driver(struct cpuidle_driver *drv,
}
if (!idle_state_valid(state_node, i, cpumask)) {
- pr_warn("%s idle state not valid, bailing out\n",
- state_node->full_name);
+ pr_warn("%pOF idle state not valid, bailing out\n",
+ state_node);
err = -EINVAL;
break;
}
@@ -200,8 +200,8 @@ int dt_init_idle_driver(struct cpuidle_driver *drv,
idle_state = &drv->states[state_idx++];
err = init_state_node(idle_state, matches, state_node);
if (err) {
- pr_err("Parsing idle state node %s failed with err %d\n",
- state_node->full_name, err);
+ pr_err("Parsing idle state node %pOF failed with err %d\n",
+ state_node, err);
err = -EINVAL;
break;
}
diff --git a/drivers/cpuidle/governors/ladder.c b/drivers/cpuidle/governors/ladder.c
index ac321f09e717..ce1a2ffffb2a 100644
--- a/drivers/cpuidle/governors/ladder.c
+++ b/drivers/cpuidle/governors/ladder.c
@@ -69,6 +69,7 @@ static int ladder_select_state(struct cpuidle_driver *drv,
struct ladder_device *ldev = this_cpu_ptr(&ladder_devices);
struct ladder_device_state *last_state;
int last_residency, last_idx = ldev->last_state_idx;
+ int first_idx = drv->states[0].flags & CPUIDLE_FLAG_POLLING ? 1 : 0;
int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
/* Special case when user has set very strict latency requirement */
@@ -96,13 +97,13 @@ static int ladder_select_state(struct cpuidle_driver *drv,
}
/* consider demotion */
- if (last_idx > CPUIDLE_DRIVER_STATE_START &&
+ if (last_idx > first_idx &&
(drv->states[last_idx].disabled ||
dev->states_usage[last_idx].disable ||
drv->states[last_idx].exit_latency > latency_req)) {
int i;
- for (i = last_idx - 1; i > CPUIDLE_DRIVER_STATE_START; i--) {
+ for (i = last_idx - 1; i > first_idx; i--) {
if (drv->states[i].exit_latency <= latency_req)
break;
}
@@ -110,7 +111,7 @@ static int ladder_select_state(struct cpuidle_driver *drv,
return i;
}
- if (last_idx > CPUIDLE_DRIVER_STATE_START &&
+ if (last_idx > first_idx &&
last_residency < last_state->threshold.demotion_time) {
last_state->stats.demotion_count++;
last_state->stats.promotion_count = 0;
@@ -133,13 +134,14 @@ static int ladder_enable_device(struct cpuidle_driver *drv,
struct cpuidle_device *dev)
{
int i;
+ int first_idx = drv->states[0].flags & CPUIDLE_FLAG_POLLING ? 1 : 0;
struct ladder_device *ldev = &per_cpu(ladder_devices, dev->cpu);
struct ladder_device_state *lstate;
struct cpuidle_state *state;
- ldev->last_state_idx = CPUIDLE_DRIVER_STATE_START;
+ ldev->last_state_idx = first_idx;
- for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++) {
+ for (i = first_idx; i < drv->state_count; i++) {
state = &drv->states[i];
lstate = &ldev->states[i];
@@ -151,7 +153,7 @@ static int ladder_enable_device(struct cpuidle_driver *drv,
if (i < drv->state_count - 1)
lstate->threshold.promotion_time = state->exit_latency;
- if (i > CPUIDLE_DRIVER_STATE_START)
+ if (i > first_idx)
lstate->threshold.demotion_time = state->exit_latency;
}
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index 61b64c2b2cb8..48eaf2879228 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -324,8 +324,9 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
expected_interval = get_typical_interval(data);
expected_interval = min(expected_interval, data->next_timer_us);
- if (CPUIDLE_DRIVER_STATE_START > 0) {
- struct cpuidle_state *s = &drv->states[CPUIDLE_DRIVER_STATE_START];
+ first_idx = 0;
+ if (drv->states[0].flags & CPUIDLE_FLAG_POLLING) {
+ struct cpuidle_state *s = &drv->states[1];
unsigned int polling_threshold;
/*
@@ -336,12 +337,8 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
polling_threshold = max_t(unsigned int, 20, s->target_residency);
if (data->next_timer_us > polling_threshold &&
latency_req > s->exit_latency && !s->disabled &&
- !dev->states_usage[CPUIDLE_DRIVER_STATE_START].disable)
- first_idx = CPUIDLE_DRIVER_STATE_START;
- else
- first_idx = CPUIDLE_DRIVER_STATE_START - 1;
- } else {
- first_idx = 0;
+ !dev->states_usage[1].disable)
+ first_idx = 1;
}
/*
diff --git a/drivers/cpuidle/poll_state.c b/drivers/cpuidle/poll_state.c
new file mode 100644
index 000000000000..7416b16287de
--- /dev/null
+++ b/drivers/cpuidle/poll_state.c
@@ -0,0 +1,37 @@
+/*
+ * poll_state.c - Polling idle state
+ *
+ * This file is released under the GPLv2.
+ */
+
+#include <linux/cpuidle.h>
+#include <linux/sched.h>
+#include <linux/sched/idle.h>
+
+static int __cpuidle poll_idle(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int index)
+{
+ local_irq_enable();
+ if (!current_set_polling_and_test()) {
+ while (!need_resched())
+ cpu_relax();
+ }
+ current_clr_polling();
+
+ return index;
+}
+
+void cpuidle_poll_state_init(struct cpuidle_driver *drv)
+{
+ struct cpuidle_state *state = &drv->states[0];
+
+ snprintf(state->name, CPUIDLE_NAME_LEN, "POLL");
+ snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE");
+ state->exit_latency = 0;
+ state->target_residency = 0;
+ state->power_usage = -1;
+ state->enter = poll_idle;
+ state->disabled = false;
+ state->flags = CPUIDLE_FLAG_POLLING;
+}
+EXPORT_SYMBOL_GPL(cpuidle_poll_state_init);
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 4b75084fabad..fe33c199fc1a 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -525,12 +525,26 @@ config CRYPTO_DEV_ATMEL_SHA
To compile this driver as a module, choose M here: the module
will be called atmel-sha.
+config CRYPTO_DEV_ATMEL_ECC
+ tristate "Support for Microchip / Atmel ECC hw accelerator"
+ depends on ARCH_AT91 || COMPILE_TEST
+ depends on I2C
+ select CRYPTO_ECDH
+ select CRC16
+ help
+ Microhip / Atmel ECC hw accelerator.
+ Select this if you want to use the Microchip / Atmel module for
+ ECDH algorithm.
+
+ To compile this driver as a module, choose M here: the module
+ will be called atmel-ecc.
+
config CRYPTO_DEV_CCP
- bool "Support for AMD Cryptographic Coprocessor"
+ bool "Support for AMD Secure Processor"
depends on ((X86 && PCI) || (ARM64 && (OF_ADDRESS || ACPI))) && HAS_IOMEM
help
- The AMD Cryptographic Coprocessor provides hardware offload support
- for encryption, hashing and related operations.
+ The AMD Secure Processor provides support for the Cryptographic Coprocessor
+ (CCP) and the Platform Security Processor (PSP) devices.
if CRYPTO_DEV_CCP
source "drivers/crypto/ccp/Kconfig"
@@ -616,6 +630,14 @@ config CRYPTO_DEV_SUN4I_SS
To compile this driver as a module, choose M here: the module
will be called sun4i-ss.
+config CRYPTO_DEV_SUN4I_SS_PRNG
+ bool "Support for Allwinner Security System PRNG"
+ depends on CRYPTO_DEV_SUN4I_SS
+ select CRYPTO_RNG
+ help
+ Select this option if you want to provide kernel-side support for
+ the Pseudo-Random Number Generator found in the Security System.
+
config CRYPTO_DEV_ROCKCHIP
tristate "Rockchip's Cryptographic Engine driver"
depends on OF && ARCH_ROCKCHIP
@@ -686,4 +708,25 @@ config CRYPTO_DEV_SAFEXCEL
chain mode, AES cipher mode and SHA1/SHA224/SHA256/SHA512 hash
algorithms.
+config CRYPTO_DEV_ARTPEC6
+ tristate "Support for Axis ARTPEC-6/7 hardware crypto acceleration."
+ depends on ARM && (ARCH_ARTPEC || COMPILE_TEST)
+ depends on HAS_DMA
+ depends on OF
+ select CRYPTO_AEAD
+ select CRYPTO_AES
+ select CRYPTO_ALGAPI
+ select CRYPTO_BLKCIPHER
+ select CRYPTO_CTR
+ select CRYPTO_HASH
+ select CRYPTO_SHA1
+ select CRYPTO_SHA256
+ select CRYPTO_SHA384
+ select CRYPTO_SHA512
+ help
+ Enables the driver for the on-chip crypto accelerator
+ of Axis ARTPEC SoCs.
+
+ To compile this driver as a module, choose M here.
+
endif # CRYPTO_HW
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 2c555a3393b2..808432b44c6b 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -1,6 +1,7 @@
obj-$(CONFIG_CRYPTO_DEV_ATMEL_AES) += atmel-aes.o
obj-$(CONFIG_CRYPTO_DEV_ATMEL_SHA) += atmel-sha.o
obj-$(CONFIG_CRYPTO_DEV_ATMEL_TDES) += atmel-tdes.o
+obj-$(CONFIG_CRYPTO_DEV_ATMEL_ECC) += atmel-ecc.o
obj-$(CONFIG_CRYPTO_DEV_BFIN_CRC) += bfin_crc.o
obj-$(CONFIG_CRYPTO_DEV_CAVIUM_ZIP) += cavium/
obj-$(CONFIG_CRYPTO_DEV_CCP) += ccp/
@@ -35,7 +36,7 @@ obj-$(CONFIG_CRYPTO_DEV_QCE) += qce/
obj-$(CONFIG_CRYPTO_DEV_ROCKCHIP) += rockchip/
obj-$(CONFIG_CRYPTO_DEV_S5P) += s5p-sss.o
obj-$(CONFIG_CRYPTO_DEV_SAHARA) += sahara.o
-obj-$(CONFIG_CRYPTO_DEV_STM32) += stm32/
+obj-$(CONFIG_ARCH_STM32) += stm32/
obj-$(CONFIG_CRYPTO_DEV_SUN4I_SS) += sunxi-ss/
obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o
obj-$(CONFIG_CRYPTO_DEV_UX500) += ux500/
@@ -43,3 +44,4 @@ obj-$(CONFIG_CRYPTO_DEV_VIRTIO) += virtio/
obj-$(CONFIG_CRYPTO_DEV_VMX) += vmx/
obj-$(CONFIG_CRYPTO_DEV_BCM_SPU) += bcm/
obj-$(CONFIG_CRYPTO_DEV_SAFEXCEL) += inside-secure/
+obj-$(CONFIG_CRYPTO_DEV_ARTPEC6) += axis/
diff --git a/drivers/crypto/atmel-ecc.c b/drivers/crypto/atmel-ecc.c
new file mode 100644
index 000000000000..e66f18a0ddd0
--- /dev/null
+++ b/drivers/crypto/atmel-ecc.c
@@ -0,0 +1,781 @@
+/*
+ * Microchip / Atmel ECC (I2C) driver.
+ *
+ * Copyright (c) 2017, Microchip Technology Inc.
+ * Author: Tudor Ambarus <tudor.ambarus@microchip.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/bitrev.h>
+#include <linux/crc16.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include <crypto/internal/kpp.h>
+#include <crypto/ecdh.h>
+#include <crypto/kpp.h>
+#include "atmel-ecc.h"
+
+/* Used for binding tfm objects to i2c clients. */
+struct atmel_ecc_driver_data {
+ struct list_head i2c_client_list;
+ spinlock_t i2c_list_lock;
+} ____cacheline_aligned;
+
+static struct atmel_ecc_driver_data driver_data;
+
+/**
+ * atmel_ecc_i2c_client_priv - i2c_client private data
+ * @client : pointer to i2c client device
+ * @i2c_client_list_node: part of i2c_client_list
+ * @lock : lock for sending i2c commands
+ * @wake_token : wake token array of zeros
+ * @wake_token_sz : size in bytes of the wake_token
+ * @tfm_count : number of active crypto transformations on i2c client
+ *
+ * Reads and writes from/to the i2c client are sequential. The first byte
+ * transmitted to the device is treated as the byte size. Any attempt to send
+ * more than this number of bytes will cause the device to not ACK those bytes.
+ * After the host writes a single command byte to the input buffer, reads are
+ * prohibited until after the device completes command execution. Use a mutex
+ * when sending i2c commands.
+ */
+struct atmel_ecc_i2c_client_priv {
+ struct i2c_client *client;
+ struct list_head i2c_client_list_node;
+ struct mutex lock;
+ u8 wake_token[WAKE_TOKEN_MAX_SIZE];
+ size_t wake_token_sz;
+ atomic_t tfm_count ____cacheline_aligned;
+};
+
+/**
+ * atmel_ecdh_ctx - transformation context
+ * @client : pointer to i2c client device
+ * @fallback : used for unsupported curves or when user wants to use its own
+ * private key.
+ * @public_key : generated when calling set_secret(). It's the responsibility
+ * of the user to not call set_secret() while
+ * generate_public_key() or compute_shared_secret() are in flight.
+ * @curve_id : elliptic curve id
+ * @n_sz : size in bytes of the n prime
+ * @do_fallback: true when the device doesn't support the curve or when the user
+ * wants to use its own private key.
+ */
+struct atmel_ecdh_ctx {
+ struct i2c_client *client;
+ struct crypto_kpp *fallback;
+ const u8 *public_key;
+ unsigned int curve_id;
+ size_t n_sz;
+ bool do_fallback;
+};
+
+/**
+ * atmel_ecc_work_data - data structure representing the work
+ * @ctx : transformation context.
+ * @cbk : pointer to a callback function to be invoked upon completion of this
+ * request. This has the form:
+ * callback(struct atmel_ecc_work_data *work_data, void *areq, u8 status)
+ * where:
+ * @work_data: data structure representing the work
+ * @areq : optional pointer to an argument passed with the original
+ * request.
+ * @status : status returned from the i2c client device or i2c error.
+ * @areq: optional pointer to a user argument for use at callback time.
+ * @work: describes the task to be executed.
+ * @cmd : structure used for communicating with the device.
+ */
+struct atmel_ecc_work_data {
+ struct atmel_ecdh_ctx *ctx;
+ void (*cbk)(struct atmel_ecc_work_data *work_data, void *areq,
+ int status);
+ void *areq;
+ struct work_struct work;
+ struct atmel_ecc_cmd cmd;
+};
+
+static u16 atmel_ecc_crc16(u16 crc, const u8 *buffer, size_t len)
+{
+ return cpu_to_le16(bitrev16(crc16(crc, buffer, len)));
+}
+
+/**
+ * atmel_ecc_checksum() - Generate 16-bit CRC as required by ATMEL ECC.
+ * CRC16 verification of the count, opcode, param1, param2 and data bytes.
+ * The checksum is saved in little-endian format in the least significant
+ * two bytes of the command. CRC polynomial is 0x8005 and the initial register
+ * value should be zero.
+ *
+ * @cmd : structure used for communicating with the device.
+ */
+static void atmel_ecc_checksum(struct atmel_ecc_cmd *cmd)
+{
+ u8 *data = &cmd->count;
+ size_t len = cmd->count - CRC_SIZE;
+ u16 *crc16 = (u16 *)(data + len);
+
+ *crc16 = atmel_ecc_crc16(0, data, len);
+}
+
+static void atmel_ecc_init_read_cmd(struct atmel_ecc_cmd *cmd)
+{
+ cmd->word_addr = COMMAND;
+ cmd->opcode = OPCODE_READ;
+ /*
+ * Read the word from Configuration zone that contains the lock bytes
+ * (UserExtra, Selector, LockValue, LockConfig).
+ */
+ cmd->param1 = CONFIG_ZONE;
+ cmd->param2 = DEVICE_LOCK_ADDR;
+ cmd->count = READ_COUNT;
+
+ atmel_ecc_checksum(cmd);
+
+ cmd->msecs = MAX_EXEC_TIME_READ;
+ cmd->rxsize = READ_RSP_SIZE;
+}
+
+static void atmel_ecc_init_genkey_cmd(struct atmel_ecc_cmd *cmd, u16 keyid)
+{
+ cmd->word_addr = COMMAND;
+ cmd->count = GENKEY_COUNT;
+ cmd->opcode = OPCODE_GENKEY;
+ cmd->param1 = GENKEY_MODE_PRIVATE;
+ /* a random private key will be generated and stored in slot keyID */
+ cmd->param2 = cpu_to_le16(keyid);
+
+ atmel_ecc_checksum(cmd);
+
+ cmd->msecs = MAX_EXEC_TIME_GENKEY;
+ cmd->rxsize = GENKEY_RSP_SIZE;
+}
+
+static int atmel_ecc_init_ecdh_cmd(struct atmel_ecc_cmd *cmd,
+ struct scatterlist *pubkey)
+{
+ size_t copied;
+
+ cmd->word_addr = COMMAND;
+ cmd->count = ECDH_COUNT;
+ cmd->opcode = OPCODE_ECDH;
+ cmd->param1 = ECDH_PREFIX_MODE;
+ /* private key slot */
+ cmd->param2 = cpu_to_le16(DATA_SLOT_2);
+
+ /*
+ * The device only supports NIST P256 ECC keys. The public key size will
+ * always be the same. Use a macro for the key size to avoid unnecessary
+ * computations.
+ */
+ copied = sg_copy_to_buffer(pubkey, 1, cmd->data, ATMEL_ECC_PUBKEY_SIZE);
+ if (copied != ATMEL_ECC_PUBKEY_SIZE)
+ return -EINVAL;
+
+ atmel_ecc_checksum(cmd);
+
+ cmd->msecs = MAX_EXEC_TIME_ECDH;
+ cmd->rxsize = ECDH_RSP_SIZE;
+
+ return 0;
+}
+
+/*
+ * After wake and after execution of a command, there will be error, status, or
+ * result bytes in the device's output register that can be retrieved by the
+ * system. When the length of that group is four bytes, the codes returned are
+ * detailed in error_list.
+ */
+static int atmel_ecc_status(struct device *dev, u8 *status)
+{
+ size_t err_list_len = ARRAY_SIZE(error_list);
+ int i;
+ u8 err_id = status[1];
+
+ if (*status != STATUS_SIZE)
+ return 0;
+
+ if (err_id == STATUS_WAKE_SUCCESSFUL || err_id == STATUS_NOERR)
+ return 0;
+
+ for (i = 0; i < err_list_len; i++)
+ if (error_list[i].value == err_id)
+ break;
+
+ /* if err_id is not in the error_list then ignore it */
+ if (i != err_list_len) {
+ dev_err(dev, "%02x: %s:\n", err_id, error_list[i].error_text);
+ return err_id;
+ }
+
+ return 0;
+}
+
+static int atmel_ecc_wakeup(struct i2c_client *client)
+{
+ struct atmel_ecc_i2c_client_priv *i2c_priv = i2c_get_clientdata(client);
+ u8 status[STATUS_RSP_SIZE];
+ int ret;
+
+ /*
+ * The device ignores any levels or transitions on the SCL pin when the
+ * device is idle, asleep or during waking up. Don't check for error
+ * when waking up the device.
+ */
+ i2c_master_send(client, i2c_priv->wake_token, i2c_priv->wake_token_sz);
+
+ /*
+ * Wait to wake the device. Typical execution times for ecdh and genkey
+ * are around tens of milliseconds. Delta is chosen to 50 microseconds.
+ */
+ usleep_range(TWHI_MIN, TWHI_MAX);
+
+ ret = i2c_master_recv(client, status, STATUS_SIZE);
+ if (ret < 0)
+ return ret;
+
+ return atmel_ecc_status(&client->dev, status);
+}
+
+static int atmel_ecc_sleep(struct i2c_client *client)
+{
+ u8 sleep = SLEEP_TOKEN;
+
+ return i2c_master_send(client, &sleep, 1);
+}
+
+static void atmel_ecdh_done(struct atmel_ecc_work_data *work_data, void *areq,
+ int status)
+{
+ struct kpp_request *req = areq;
+ struct atmel_ecdh_ctx *ctx = work_data->ctx;
+ struct atmel_ecc_cmd *cmd = &work_data->cmd;
+ size_t copied;
+ size_t n_sz = ctx->n_sz;
+
+ if (status)
+ goto free_work_data;
+
+ /* copy the shared secret */
+ copied = sg_copy_from_buffer(req->dst, 1, &cmd->data[RSP_DATA_IDX],
+ n_sz);
+ if (copied != n_sz)
+ status = -EINVAL;
+
+ /* fall through */
+free_work_data:
+ kzfree(work_data);
+ kpp_request_complete(req, status);
+}
+
+/*
+ * atmel_ecc_send_receive() - send a command to the device and receive its
+ * response.
+ * @client: i2c client device
+ * @cmd : structure used to communicate with the device
+ *
+ * After the device receives a Wake token, a watchdog counter starts within the
+ * device. After the watchdog timer expires, the device enters sleep mode
+ * regardless of whether some I/O transmission or command execution is in
+ * progress. If a command is attempted when insufficient time remains prior to
+ * watchdog timer execution, the device will return the watchdog timeout error
+ * code without attempting to execute the command. There is no way to reset the
+ * counter other than to put the device into sleep or idle mode and then
+ * wake it up again.
+ */
+static int atmel_ecc_send_receive(struct i2c_client *client,
+ struct atmel_ecc_cmd *cmd)
+{
+ struct atmel_ecc_i2c_client_priv *i2c_priv = i2c_get_clientdata(client);
+ int ret;
+
+ mutex_lock(&i2c_priv->lock);
+
+ ret = atmel_ecc_wakeup(client);
+ if (ret)
+ goto err;
+
+ /* send the command */
+ ret = i2c_master_send(client, (u8 *)cmd, cmd->count + WORD_ADDR_SIZE);
+ if (ret < 0)
+ goto err;
+
+ /* delay the appropriate amount of time for command to execute */
+ msleep(cmd->msecs);
+
+ /* receive the response */
+ ret = i2c_master_recv(client, cmd->data, cmd->rxsize);
+ if (ret < 0)
+ goto err;
+
+ /* put the device into low-power mode */
+ ret = atmel_ecc_sleep(client);
+ if (ret < 0)
+ goto err;
+
+ mutex_unlock(&i2c_priv->lock);
+ return atmel_ecc_status(&client->dev, cmd->data);
+err:
+ mutex_unlock(&i2c_priv->lock);
+ return ret;
+}
+
+static void atmel_ecc_work_handler(struct work_struct *work)
+{
+ struct atmel_ecc_work_data *work_data =
+ container_of(work, struct atmel_ecc_work_data, work);
+ struct atmel_ecc_cmd *cmd = &work_data->cmd;
+ struct i2c_client *client = work_data->ctx->client;
+ int status;
+
+ status = atmel_ecc_send_receive(client, cmd);
+ work_data->cbk(work_data, work_data->areq, status);
+}
+
+static void atmel_ecc_enqueue(struct atmel_ecc_work_data *work_data,
+ void (*cbk)(struct atmel_ecc_work_data *work_data,
+ void *areq, int status),
+ void *areq)
+{
+ work_data->cbk = (void *)cbk;
+ work_data->areq = areq;
+
+ INIT_WORK(&work_data->work, atmel_ecc_work_handler);
+ schedule_work(&work_data->work);
+}
+
+static unsigned int atmel_ecdh_supported_curve(unsigned int curve_id)
+{
+ if (curve_id == ECC_CURVE_NIST_P256)
+ return ATMEL_ECC_NIST_P256_N_SIZE;
+
+ return 0;
+}
+
+/*
+ * A random private key is generated and stored in the device. The device
+ * returns the pair public key.
+ */
+static int atmel_ecdh_set_secret(struct crypto_kpp *tfm, const void *buf,
+ unsigned int len)
+{
+ struct atmel_ecdh_ctx *ctx = kpp_tfm_ctx(tfm);
+ struct atmel_ecc_cmd *cmd;
+ void *public_key;
+ struct ecdh params;
+ int ret = -ENOMEM;
+
+ /* free the old public key, if any */
+ kfree(ctx->public_key);
+ /* make sure you don't free the old public key twice */
+ ctx->public_key = NULL;
+
+ if (crypto_ecdh_decode_key(buf, len, &params) < 0) {
+ dev_err(&ctx->client->dev, "crypto_ecdh_decode_key failed\n");
+ return -EINVAL;
+ }
+
+ ctx->n_sz = atmel_ecdh_supported_curve(params.curve_id);
+ if (!ctx->n_sz || params.key_size) {
+ /* fallback to ecdh software implementation */
+ ctx->do_fallback = true;
+ return crypto_kpp_set_secret(ctx->fallback, buf, len);
+ }
+
+ cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ /*
+ * The device only supports NIST P256 ECC keys. The public key size will
+ * always be the same. Use a macro for the key size to avoid unnecessary
+ * computations.
+ */
+ public_key = kmalloc(ATMEL_ECC_PUBKEY_SIZE, GFP_KERNEL);
+ if (!public_key)
+ goto free_cmd;
+
+ ctx->do_fallback = false;
+ ctx->curve_id = params.curve_id;
+
+ atmel_ecc_init_genkey_cmd(cmd, DATA_SLOT_2);
+
+ ret = atmel_ecc_send_receive(ctx->client, cmd);
+ if (ret)
+ goto free_public_key;
+
+ /* save the public key */
+ memcpy(public_key, &cmd->data[RSP_DATA_IDX], ATMEL_ECC_PUBKEY_SIZE);
+ ctx->public_key = public_key;
+
+ kfree(cmd);
+ return 0;
+
+free_public_key:
+ kfree(public_key);
+free_cmd:
+ kfree(cmd);
+ return ret;
+}
+
+static int atmel_ecdh_generate_public_key(struct kpp_request *req)
+{
+ struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
+ struct atmel_ecdh_ctx *ctx = kpp_tfm_ctx(tfm);
+ size_t copied;
+ int ret = 0;
+
+ if (ctx->do_fallback) {
+ kpp_request_set_tfm(req, ctx->fallback);
+ return crypto_kpp_generate_public_key(req);
+ }
+
+ /* public key was saved at private key generation */
+ copied = sg_copy_from_buffer(req->dst, 1, ctx->public_key,
+ ATMEL_ECC_PUBKEY_SIZE);
+ if (copied != ATMEL_ECC_PUBKEY_SIZE)
+ ret = -EINVAL;
+
+ return ret;
+}
+
+static int atmel_ecdh_compute_shared_secret(struct kpp_request *req)
+{
+ struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
+ struct atmel_ecdh_ctx *ctx = kpp_tfm_ctx(tfm);
+ struct atmel_ecc_work_data *work_data;
+ gfp_t gfp;
+ int ret;
+
+ if (ctx->do_fallback) {
+ kpp_request_set_tfm(req, ctx->fallback);
+ return crypto_kpp_compute_shared_secret(req);
+ }
+
+ gfp = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL :
+ GFP_ATOMIC;
+
+ work_data = kmalloc(sizeof(*work_data), gfp);
+ if (!work_data)
+ return -ENOMEM;
+
+ work_data->ctx = ctx;
+
+ ret = atmel_ecc_init_ecdh_cmd(&work_data->cmd, req->src);
+ if (ret)
+ goto free_work_data;
+
+ atmel_ecc_enqueue(work_data, atmel_ecdh_done, req);
+
+ return -EINPROGRESS;
+
+free_work_data:
+ kfree(work_data);
+ return ret;
+}
+
+static struct i2c_client *atmel_ecc_i2c_client_alloc(void)
+{
+ struct atmel_ecc_i2c_client_priv *i2c_priv, *min_i2c_priv = NULL;
+ struct i2c_client *client = ERR_PTR(-ENODEV);
+ int min_tfm_cnt = INT_MAX;
+ int tfm_cnt;
+
+ spin_lock(&driver_data.i2c_list_lock);
+
+ if (list_empty(&driver_data.i2c_client_list)) {
+ spin_unlock(&driver_data.i2c_list_lock);
+ return ERR_PTR(-ENODEV);
+ }
+
+ list_for_each_entry(i2c_priv, &driver_data.i2c_client_list,
+ i2c_client_list_node) {
+ tfm_cnt = atomic_read(&i2c_priv->tfm_count);
+ if (tfm_cnt < min_tfm_cnt) {
+ min_tfm_cnt = tfm_cnt;
+ min_i2c_priv = i2c_priv;
+ }
+ if (!min_tfm_cnt)
+ break;
+ }
+
+ if (min_i2c_priv) {
+ atomic_inc(&min_i2c_priv->tfm_count);
+ client = min_i2c_priv->client;
+ }
+
+ spin_unlock(&driver_data.i2c_list_lock);
+
+ return client;
+}
+
+static void atmel_ecc_i2c_client_free(struct i2c_client *client)
+{
+ struct atmel_ecc_i2c_client_priv *i2c_priv = i2c_get_clientdata(client);
+
+ atomic_dec(&i2c_priv->tfm_count);
+}
+
+static int atmel_ecdh_init_tfm(struct crypto_kpp *tfm)
+{
+ const char *alg = kpp_alg_name(tfm);
+ struct crypto_kpp *fallback;
+ struct atmel_ecdh_ctx *ctx = kpp_tfm_ctx(tfm);
+
+ ctx->client = atmel_ecc_i2c_client_alloc();
+ if (IS_ERR(ctx->client)) {
+ pr_err("tfm - i2c_client binding failed\n");
+ return PTR_ERR(ctx->client);
+ }
+
+ fallback = crypto_alloc_kpp(alg, 0, CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(fallback)) {
+ dev_err(&ctx->client->dev, "Failed to allocate transformation for '%s': %ld\n",
+ alg, PTR_ERR(fallback));
+ return PTR_ERR(fallback);
+ }
+
+ crypto_kpp_set_flags(fallback, crypto_kpp_get_flags(tfm));
+
+ dev_info(&ctx->client->dev, "Using '%s' as fallback implementation.\n",
+ crypto_tfm_alg_driver_name(crypto_kpp_tfm(fallback)));
+
+ ctx->fallback = fallback;
+
+ return 0;
+}
+
+static void atmel_ecdh_exit_tfm(struct crypto_kpp *tfm)
+{
+ struct atmel_ecdh_ctx *ctx = kpp_tfm_ctx(tfm);
+
+ kfree(ctx->public_key);
+ crypto_free_kpp(ctx->fallback);
+ atmel_ecc_i2c_client_free(ctx->client);
+}
+
+static unsigned int atmel_ecdh_max_size(struct crypto_kpp *tfm)
+{
+ struct atmel_ecdh_ctx *ctx = kpp_tfm_ctx(tfm);
+
+ if (ctx->fallback)
+ return crypto_kpp_maxsize(ctx->fallback);
+
+ /*
+ * The device only supports NIST P256 ECC keys. The public key size will
+ * always be the same. Use a macro for the key size to avoid unnecessary
+ * computations.
+ */
+ return ATMEL_ECC_PUBKEY_SIZE;
+}
+
+static struct kpp_alg atmel_ecdh = {
+ .set_secret = atmel_ecdh_set_secret,
+ .generate_public_key = atmel_ecdh_generate_public_key,
+ .compute_shared_secret = atmel_ecdh_compute_shared_secret,
+ .init = atmel_ecdh_init_tfm,
+ .exit = atmel_ecdh_exit_tfm,
+ .max_size = atmel_ecdh_max_size,
+ .base = {
+ .cra_flags = CRYPTO_ALG_NEED_FALLBACK,
+ .cra_name = "ecdh",
+ .cra_driver_name = "atmel-ecdh",
+ .cra_priority = ATMEL_ECC_PRIORITY,
+ .cra_module = THIS_MODULE,
+ .cra_ctxsize = sizeof(struct atmel_ecdh_ctx),
+ },
+};
+
+static inline size_t atmel_ecc_wake_token_sz(u32 bus_clk_rate)
+{
+ u32 no_of_bits = DIV_ROUND_UP(TWLO_USEC * bus_clk_rate, USEC_PER_SEC);
+
+ /* return the size of the wake_token in bytes */
+ return DIV_ROUND_UP(no_of_bits, 8);
+}
+
+static int device_sanity_check(struct i2c_client *client)
+{
+ struct atmel_ecc_cmd *cmd;
+ int ret;
+
+ cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ atmel_ecc_init_read_cmd(cmd);
+
+ ret = atmel_ecc_send_receive(client, cmd);
+ if (ret)
+ goto free_cmd;
+
+ /*
+ * It is vital that the Configuration, Data and OTP zones be locked
+ * prior to release into the field of the system containing the device.
+ * Failure to lock these zones may permit modification of any secret
+ * keys and may lead to other security problems.
+ */
+ if (cmd->data[LOCK_CONFIG_IDX] || cmd->data[LOCK_VALUE_IDX]) {
+ dev_err(&client->dev, "Configuration or Data and OTP zones are unlocked!\n");
+ ret = -ENOTSUPP;
+ }
+
+ /* fall through */
+free_cmd:
+ kfree(cmd);
+ return ret;
+}
+
+static int atmel_ecc_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct atmel_ecc_i2c_client_priv *i2c_priv;
+ struct device *dev = &client->dev;
+ int ret;
+ u32 bus_clk_rate;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ dev_err(dev, "I2C_FUNC_I2C not supported\n");
+ return -ENODEV;
+ }
+
+ ret = of_property_read_u32(client->adapter->dev.of_node,
+ "clock-frequency", &bus_clk_rate);
+ if (ret) {
+ dev_err(dev, "of: failed to read clock-frequency property\n");
+ return ret;
+ }
+
+ if (bus_clk_rate > 1000000L) {
+ dev_err(dev, "%d exceeds maximum supported clock frequency (1MHz)\n",
+ bus_clk_rate);
+ return -EINVAL;
+ }
+
+ i2c_priv = devm_kmalloc(dev, sizeof(*i2c_priv), GFP_KERNEL);
+ if (!i2c_priv)
+ return -ENOMEM;
+
+ i2c_priv->client = client;
+ mutex_init(&i2c_priv->lock);
+
+ /*
+ * WAKE_TOKEN_MAX_SIZE was calculated for the maximum bus_clk_rate -
+ * 1MHz. The previous bus_clk_rate check ensures us that wake_token_sz
+ * will always be smaller than or equal to WAKE_TOKEN_MAX_SIZE.
+ */
+ i2c_priv->wake_token_sz = atmel_ecc_wake_token_sz(bus_clk_rate);
+
+ memset(i2c_priv->wake_token, 0, sizeof(i2c_priv->wake_token));
+
+ atomic_set(&i2c_priv->tfm_count, 0);
+
+ i2c_set_clientdata(client, i2c_priv);
+
+ ret = device_sanity_check(client);
+ if (ret)
+ return ret;
+
+ spin_lock(&driver_data.i2c_list_lock);
+ list_add_tail(&i2c_priv->i2c_client_list_node,
+ &driver_data.i2c_client_list);
+ spin_unlock(&driver_data.i2c_list_lock);
+
+ ret = crypto_register_kpp(&atmel_ecdh);
+ if (ret) {
+ spin_lock(&driver_data.i2c_list_lock);
+ list_del(&i2c_priv->i2c_client_list_node);
+ spin_unlock(&driver_data.i2c_list_lock);
+
+ dev_err(dev, "%s alg registration failed\n",
+ atmel_ecdh.base.cra_driver_name);
+ } else {
+ dev_info(dev, "atmel ecc algorithms registered in /proc/crypto\n");
+ }
+
+ return ret;
+}
+
+static int atmel_ecc_remove(struct i2c_client *client)
+{
+ struct atmel_ecc_i2c_client_priv *i2c_priv = i2c_get_clientdata(client);
+
+ /* Return EBUSY if i2c client already allocated. */
+ if (atomic_read(&i2c_priv->tfm_count)) {
+ dev_err(&client->dev, "Device is busy\n");
+ return -EBUSY;
+ }
+
+ crypto_unregister_kpp(&atmel_ecdh);
+
+ spin_lock(&driver_data.i2c_list_lock);
+ list_del(&i2c_priv->i2c_client_list_node);
+ spin_unlock(&driver_data.i2c_list_lock);
+
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id atmel_ecc_dt_ids[] = {
+ {
+ .compatible = "atmel,atecc508a",
+ }, {
+ /* sentinel */
+ }
+};
+MODULE_DEVICE_TABLE(of, atmel_ecc_dt_ids);
+#endif
+
+static const struct i2c_device_id atmel_ecc_id[] = {
+ { "atecc508a", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, atmel_ecc_id);
+
+static struct i2c_driver atmel_ecc_driver = {
+ .driver = {
+ .name = "atmel-ecc",
+ .of_match_table = of_match_ptr(atmel_ecc_dt_ids),
+ },
+ .probe = atmel_ecc_probe,
+ .remove = atmel_ecc_remove,
+ .id_table = atmel_ecc_id,
+};
+
+static int __init atmel_ecc_init(void)
+{
+ spin_lock_init(&driver_data.i2c_list_lock);
+ INIT_LIST_HEAD(&driver_data.i2c_client_list);
+ return i2c_add_driver(&atmel_ecc_driver);
+}
+
+static void __exit atmel_ecc_exit(void)
+{
+ flush_scheduled_work();
+ i2c_del_driver(&atmel_ecc_driver);
+}
+
+module_init(atmel_ecc_init);
+module_exit(atmel_ecc_exit);
+
+MODULE_AUTHOR("Tudor Ambarus <tudor.ambarus@microchip.com>");
+MODULE_DESCRIPTION("Microchip / Atmel ECC (I2C) driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/crypto/atmel-ecc.h b/drivers/crypto/atmel-ecc.h
new file mode 100644
index 000000000000..25232c8abcc2
--- /dev/null
+++ b/drivers/crypto/atmel-ecc.h
@@ -0,0 +1,128 @@
+/*
+ * Copyright (c) 2017, Microchip Technology Inc.
+ * Author: Tudor Ambarus <tudor.ambarus@microchip.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#ifndef __ATMEL_ECC_H__
+#define __ATMEL_ECC_H__
+
+#define ATMEL_ECC_PRIORITY 300
+
+#define COMMAND 0x03 /* packet function */
+#define SLEEP_TOKEN 0x01
+#define WAKE_TOKEN_MAX_SIZE 8
+
+/* Definitions of Data and Command sizes */
+#define WORD_ADDR_SIZE 1
+#define COUNT_SIZE 1
+#define CRC_SIZE 2
+#define CMD_OVERHEAD_SIZE (COUNT_SIZE + CRC_SIZE)
+
+/* size in bytes of the n prime */
+#define ATMEL_ECC_NIST_P256_N_SIZE 32
+#define ATMEL_ECC_PUBKEY_SIZE (2 * ATMEL_ECC_NIST_P256_N_SIZE)
+
+#define STATUS_RSP_SIZE 4
+#define ECDH_RSP_SIZE (32 + CMD_OVERHEAD_SIZE)
+#define GENKEY_RSP_SIZE (ATMEL_ECC_PUBKEY_SIZE + \
+ CMD_OVERHEAD_SIZE)
+#define READ_RSP_SIZE (4 + CMD_OVERHEAD_SIZE)
+#define MAX_RSP_SIZE GENKEY_RSP_SIZE
+
+/**
+ * atmel_ecc_cmd - structure used for communicating with the device.
+ * @word_addr: indicates the function of the packet sent to the device. This
+ * byte should have a value of COMMAND for normal operation.
+ * @count : number of bytes to be transferred to (or from) the device.
+ * @opcode : the command code.
+ * @param1 : the first parameter; always present.
+ * @param2 : the second parameter; always present.
+ * @data : optional remaining input data. Includes a 2-byte CRC.
+ * @rxsize : size of the data received from i2c client.
+ * @msecs : command execution time in milliseconds
+ */
+struct atmel_ecc_cmd {
+ u8 word_addr;
+ u8 count;
+ u8 opcode;
+ u8 param1;
+ u16 param2;
+ u8 data[MAX_RSP_SIZE];
+ u8 msecs;
+ u16 rxsize;
+} __packed;
+
+/* Status/Error codes */
+#define STATUS_SIZE 0x04
+#define STATUS_NOERR 0x00
+#define STATUS_WAKE_SUCCESSFUL 0x11
+
+static const struct {
+ u8 value;
+ const char *error_text;
+} error_list[] = {
+ { 0x01, "CheckMac or Verify miscompare" },
+ { 0x03, "Parse Error" },
+ { 0x05, "ECC Fault" },
+ { 0x0F, "Execution Error" },
+ { 0xEE, "Watchdog about to expire" },
+ { 0xFF, "CRC or other communication error" },
+};
+
+/* Definitions for eeprom organization */
+#define CONFIG_ZONE 0
+
+/* Definitions for Indexes common to all commands */
+#define RSP_DATA_IDX 1 /* buffer index of data in response */
+#define DATA_SLOT_2 2 /* used for ECDH private key */
+
+/* Definitions for the device lock state */
+#define DEVICE_LOCK_ADDR 0x15
+#define LOCK_VALUE_IDX (RSP_DATA_IDX + 2)
+#define LOCK_CONFIG_IDX (RSP_DATA_IDX + 3)
+
+/*
+ * Wake High delay to data communication (microseconds). SDA should be stable
+ * high for this entire duration.
+ */
+#define TWHI_MIN 1500
+#define TWHI_MAX 1550
+
+/* Wake Low duration */
+#define TWLO_USEC 60
+
+/* Command execution time (milliseconds) */
+#define MAX_EXEC_TIME_ECDH 58
+#define MAX_EXEC_TIME_GENKEY 115
+#define MAX_EXEC_TIME_READ 1
+
+/* Command opcode */
+#define OPCODE_ECDH 0x43
+#define OPCODE_GENKEY 0x40
+#define OPCODE_READ 0x02
+
+/* Definitions for the READ Command */
+#define READ_COUNT 7
+
+/* Definitions for the GenKey Command */
+#define GENKEY_COUNT 7
+#define GENKEY_MODE_PRIVATE 0x04
+
+/* Definitions for the ECDH Command */
+#define ECDH_COUNT 71
+#define ECDH_PREFIX_MODE 0x00
+
+#endif /* __ATMEL_ECC_H__ */
diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c
index dad4e5bad827..3e2f41b3eaf3 100644
--- a/drivers/crypto/atmel-sha.c
+++ b/drivers/crypto/atmel-sha.c
@@ -2883,7 +2883,7 @@ sha_dd_err:
static int atmel_sha_remove(struct platform_device *pdev)
{
- static struct atmel_sha_dev *sha_dd;
+ struct atmel_sha_dev *sha_dd;
sha_dd = platform_get_drvdata(pdev);
if (!sha_dd)
diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c
index b25f1b3c981f..f4b335dda568 100644
--- a/drivers/crypto/atmel-tdes.c
+++ b/drivers/crypto/atmel-tdes.c
@@ -1487,7 +1487,7 @@ tdes_dd_err:
static int atmel_tdes_remove(struct platform_device *pdev)
{
- static struct atmel_tdes_dev *tdes_dd;
+ struct atmel_tdes_dev *tdes_dd;
tdes_dd = platform_get_drvdata(pdev);
if (!tdes_dd)
diff --git a/drivers/crypto/axis/Makefile b/drivers/crypto/axis/Makefile
new file mode 100644
index 000000000000..be9a84a4b667
--- /dev/null
+++ b/drivers/crypto/axis/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_CRYPTO_DEV_ARTPEC6) := artpec6_crypto.o
diff --git a/drivers/crypto/axis/artpec6_crypto.c b/drivers/crypto/axis/artpec6_crypto.c
new file mode 100644
index 000000000000..d9fbbf01062b
--- /dev/null
+++ b/drivers/crypto/axis/artpec6_crypto.c
@@ -0,0 +1,3192 @@
+/*
+ * Driver for ARTPEC-6 crypto block using the kernel asynchronous crypto api.
+ *
+ * Copyright (C) 2014-2017 Axis Communications AB
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/bitfield.h>
+#include <linux/crypto.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/fault-inject.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+
+#include <crypto/aes.h>
+#include <crypto/internal/aead.h>
+#include <crypto/internal/hash.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/sha.h>
+#include <crypto/xts.h>
+
+/* Max length of a line in all cache levels for Artpec SoCs. */
+#define ARTPEC_CACHE_LINE_MAX 32
+
+#define PDMA_OUT_CFG 0x0000
+#define PDMA_OUT_BUF_CFG 0x0004
+#define PDMA_OUT_CMD 0x0008
+#define PDMA_OUT_DESCRQ_PUSH 0x0010
+#define PDMA_OUT_DESCRQ_STAT 0x0014
+
+#define A6_PDMA_IN_CFG 0x0028
+#define A6_PDMA_IN_BUF_CFG 0x002c
+#define A6_PDMA_IN_CMD 0x0030
+#define A6_PDMA_IN_STATQ_PUSH 0x0038
+#define A6_PDMA_IN_DESCRQ_PUSH 0x0044
+#define A6_PDMA_IN_DESCRQ_STAT 0x0048
+#define A6_PDMA_INTR_MASK 0x0068
+#define A6_PDMA_ACK_INTR 0x006c
+#define A6_PDMA_MASKED_INTR 0x0074
+
+#define A7_PDMA_IN_CFG 0x002c
+#define A7_PDMA_IN_BUF_CFG 0x0030
+#define A7_PDMA_IN_CMD 0x0034
+#define A7_PDMA_IN_STATQ_PUSH 0x003c
+#define A7_PDMA_IN_DESCRQ_PUSH 0x0048
+#define A7_PDMA_IN_DESCRQ_STAT 0x004C
+#define A7_PDMA_INTR_MASK 0x006c
+#define A7_PDMA_ACK_INTR 0x0070
+#define A7_PDMA_MASKED_INTR 0x0078
+
+#define PDMA_OUT_CFG_EN BIT(0)
+
+#define PDMA_OUT_BUF_CFG_DATA_BUF_SIZE GENMASK(4, 0)
+#define PDMA_OUT_BUF_CFG_DESCR_BUF_SIZE GENMASK(9, 5)
+
+#define PDMA_OUT_CMD_START BIT(0)
+#define A6_PDMA_OUT_CMD_STOP BIT(3)
+#define A7_PDMA_OUT_CMD_STOP BIT(2)
+
+#define PDMA_OUT_DESCRQ_PUSH_LEN GENMASK(5, 0)
+#define PDMA_OUT_DESCRQ_PUSH_ADDR GENMASK(31, 6)
+
+#define PDMA_OUT_DESCRQ_STAT_LEVEL GENMASK(3, 0)
+#define PDMA_OUT_DESCRQ_STAT_SIZE GENMASK(7, 4)
+
+#define PDMA_IN_CFG_EN BIT(0)
+
+#define PDMA_IN_BUF_CFG_DATA_BUF_SIZE GENMASK(4, 0)
+#define PDMA_IN_BUF_CFG_DESCR_BUF_SIZE GENMASK(9, 5)
+#define PDMA_IN_BUF_CFG_STAT_BUF_SIZE GENMASK(14, 10)
+
+#define PDMA_IN_CMD_START BIT(0)
+#define A6_PDMA_IN_CMD_FLUSH_STAT BIT(2)
+#define A6_PDMA_IN_CMD_STOP BIT(3)
+#define A7_PDMA_IN_CMD_FLUSH_STAT BIT(1)
+#define A7_PDMA_IN_CMD_STOP BIT(2)
+
+#define PDMA_IN_STATQ_PUSH_LEN GENMASK(5, 0)
+#define PDMA_IN_STATQ_PUSH_ADDR GENMASK(31, 6)
+
+#define PDMA_IN_DESCRQ_PUSH_LEN GENMASK(5, 0)
+#define PDMA_IN_DESCRQ_PUSH_ADDR GENMASK(31, 6)
+
+#define PDMA_IN_DESCRQ_STAT_LEVEL GENMASK(3, 0)
+#define PDMA_IN_DESCRQ_STAT_SIZE GENMASK(7, 4)
+
+#define A6_PDMA_INTR_MASK_IN_DATA BIT(2)
+#define A6_PDMA_INTR_MASK_IN_EOP BIT(3)
+#define A6_PDMA_INTR_MASK_IN_EOP_FLUSH BIT(4)
+
+#define A7_PDMA_INTR_MASK_IN_DATA BIT(3)
+#define A7_PDMA_INTR_MASK_IN_EOP BIT(4)
+#define A7_PDMA_INTR_MASK_IN_EOP_FLUSH BIT(5)
+
+#define A6_CRY_MD_OPER GENMASK(19, 16)
+
+#define A6_CRY_MD_HASH_SEL_CTX GENMASK(21, 20)
+#define A6_CRY_MD_HASH_HMAC_FIN BIT(23)
+
+#define A6_CRY_MD_CIPHER_LEN GENMASK(21, 20)
+#define A6_CRY_MD_CIPHER_DECR BIT(22)
+#define A6_CRY_MD_CIPHER_TWEAK BIT(23)
+#define A6_CRY_MD_CIPHER_DSEQ BIT(24)
+
+#define A7_CRY_MD_OPER GENMASK(11, 8)
+
+#define A7_CRY_MD_HASH_SEL_CTX GENMASK(13, 12)
+#define A7_CRY_MD_HASH_HMAC_FIN BIT(15)
+
+#define A7_CRY_MD_CIPHER_LEN GENMASK(13, 12)
+#define A7_CRY_MD_CIPHER_DECR BIT(14)
+#define A7_CRY_MD_CIPHER_TWEAK BIT(15)
+#define A7_CRY_MD_CIPHER_DSEQ BIT(16)
+
+/* DMA metadata constants */
+#define regk_crypto_aes_cbc 0x00000002
+#define regk_crypto_aes_ctr 0x00000003
+#define regk_crypto_aes_ecb 0x00000001
+#define regk_crypto_aes_gcm 0x00000004
+#define regk_crypto_aes_xts 0x00000005
+#define regk_crypto_cache 0x00000002
+#define a6_regk_crypto_dlkey 0x0000000a
+#define a7_regk_crypto_dlkey 0x0000000e
+#define regk_crypto_ext 0x00000001
+#define regk_crypto_hmac_sha1 0x00000007
+#define regk_crypto_hmac_sha256 0x00000009
+#define regk_crypto_hmac_sha384 0x0000000b
+#define regk_crypto_hmac_sha512 0x0000000d
+#define regk_crypto_init 0x00000000
+#define regk_crypto_key_128 0x00000000
+#define regk_crypto_key_192 0x00000001
+#define regk_crypto_key_256 0x00000002
+#define regk_crypto_null 0x00000000
+#define regk_crypto_sha1 0x00000006
+#define regk_crypto_sha256 0x00000008
+#define regk_crypto_sha384 0x0000000a
+#define regk_crypto_sha512 0x0000000c
+
+/* DMA descriptor structures */
+struct pdma_descr_ctrl {
+ unsigned char short_descr : 1;
+ unsigned char pad1 : 1;
+ unsigned char eop : 1;
+ unsigned char intr : 1;
+ unsigned char short_len : 3;
+ unsigned char pad2 : 1;
+} __packed;
+
+struct pdma_data_descr {
+ unsigned int len : 24;
+ unsigned int buf : 32;
+} __packed;
+
+struct pdma_short_descr {
+ unsigned char data[7];
+} __packed;
+
+struct pdma_descr {
+ struct pdma_descr_ctrl ctrl;
+ union {
+ struct pdma_data_descr data;
+ struct pdma_short_descr shrt;
+ };
+};
+
+struct pdma_stat_descr {
+ unsigned char pad1 : 1;
+ unsigned char pad2 : 1;
+ unsigned char eop : 1;
+ unsigned char pad3 : 5;
+ unsigned int len : 24;
+};
+
+/* Each descriptor array can hold max 64 entries */
+#define PDMA_DESCR_COUNT 64
+
+#define MODULE_NAME "Artpec-6 CA"
+
+/* Hash modes (including HMAC variants) */
+#define ARTPEC6_CRYPTO_HASH_SHA1 1
+#define ARTPEC6_CRYPTO_HASH_SHA256 2
+#define ARTPEC6_CRYPTO_HASH_SHA384 3
+#define ARTPEC6_CRYPTO_HASH_SHA512 4
+
+/* Crypto modes */
+#define ARTPEC6_CRYPTO_CIPHER_AES_ECB 1
+#define ARTPEC6_CRYPTO_CIPHER_AES_CBC 2
+#define ARTPEC6_CRYPTO_CIPHER_AES_CTR 3
+#define ARTPEC6_CRYPTO_CIPHER_AES_XTS 5
+
+/* The PDMA is a DMA-engine tightly coupled with a ciphering engine.
+ * It operates on a descriptor array with up to 64 descriptor entries.
+ * The arrays must be 64 byte aligned in memory.
+ *
+ * The ciphering unit has no registers and is completely controlled by
+ * a 4-byte metadata that is inserted at the beginning of each dma packet.
+ *
+ * A dma packet is a sequence of descriptors terminated by setting the .eop
+ * field in the final descriptor of the packet.
+ *
+ * Multiple packets are used for providing context data, key data and
+ * the plain/ciphertext.
+ *
+ * PDMA Descriptors (Array)
+ * +------+------+------+~~+-------+------+----
+ * | 0 | 1 | 2 |~~| 11 EOP| 12 | ....
+ * +--+---+--+---+----+-+~~+-------+----+-+----
+ * | | | | |
+ * | | | | |
+ * __|__ +-------++-------++-------+ +----+
+ * | MD | |Payload||Payload||Payload| | MD |
+ * +-----+ +-------++-------++-------+ +----+
+ */
+
+struct artpec6_crypto_bounce_buffer {
+ struct list_head list;
+ size_t length;
+ struct scatterlist *sg;
+ size_t offset;
+ /* buf is aligned to ARTPEC_CACHE_LINE_MAX and
+ * holds up to ARTPEC_CACHE_LINE_MAX bytes data.
+ */
+ void *buf;
+};
+
+struct artpec6_crypto_dma_map {
+ dma_addr_t dma_addr;
+ size_t size;
+ enum dma_data_direction dir;
+};
+
+struct artpec6_crypto_dma_descriptors {
+ struct pdma_descr out[PDMA_DESCR_COUNT] __aligned(64);
+ struct pdma_descr in[PDMA_DESCR_COUNT] __aligned(64);
+ u32 stat[PDMA_DESCR_COUNT] __aligned(64);
+ struct list_head bounce_buffers;
+ /* Enough maps for all out/in buffers, and all three descr. arrays */
+ struct artpec6_crypto_dma_map maps[PDMA_DESCR_COUNT * 2 + 2];
+ dma_addr_t out_dma_addr;
+ dma_addr_t in_dma_addr;
+ dma_addr_t stat_dma_addr;
+ size_t out_cnt;
+ size_t in_cnt;
+ size_t map_count;
+};
+
+enum artpec6_crypto_variant {
+ ARTPEC6_CRYPTO,
+ ARTPEC7_CRYPTO,
+};
+
+struct artpec6_crypto {
+ void __iomem *base;
+ spinlock_t queue_lock;
+ struct list_head queue; /* waiting for pdma fifo space */
+ struct list_head pending; /* submitted to pdma fifo */
+ struct tasklet_struct task;
+ struct kmem_cache *dma_cache;
+ int pending_count;
+ struct timer_list timer;
+ enum artpec6_crypto_variant variant;
+ void *pad_buffer; /* cache-aligned block padding buffer */
+ void *zero_buffer;
+};
+
+enum artpec6_crypto_hash_flags {
+ HASH_FLAG_INIT_CTX = 2,
+ HASH_FLAG_UPDATE = 4,
+ HASH_FLAG_FINALIZE = 8,
+ HASH_FLAG_HMAC = 16,
+ HASH_FLAG_UPDATE_KEY = 32,
+};
+
+struct artpec6_crypto_req_common {
+ struct list_head list;
+ struct artpec6_crypto_dma_descriptors *dma;
+ struct crypto_async_request *req;
+ void (*complete)(struct crypto_async_request *req);
+ gfp_t gfp_flags;
+};
+
+struct artpec6_hash_request_context {
+ char partial_buffer[SHA512_BLOCK_SIZE];
+ char partial_buffer_out[SHA512_BLOCK_SIZE];
+ char key_buffer[SHA512_BLOCK_SIZE];
+ char pad_buffer[SHA512_BLOCK_SIZE + 32];
+ unsigned char digeststate[SHA512_DIGEST_SIZE];
+ size_t partial_bytes;
+ u64 digcnt;
+ u32 key_md;
+ u32 hash_md;
+ enum artpec6_crypto_hash_flags hash_flags;
+ struct artpec6_crypto_req_common common;
+};
+
+struct artpec6_hash_export_state {
+ char partial_buffer[SHA512_BLOCK_SIZE];
+ unsigned char digeststate[SHA512_DIGEST_SIZE];
+ size_t partial_bytes;
+ u64 digcnt;
+ int oper;
+ unsigned int hash_flags;
+};
+
+struct artpec6_hashalg_context {
+ char hmac_key[SHA512_BLOCK_SIZE];
+ size_t hmac_key_length;
+ struct crypto_shash *child_hash;
+};
+
+struct artpec6_crypto_request_context {
+ u32 cipher_md;
+ bool decrypt;
+ struct artpec6_crypto_req_common common;
+};
+
+struct artpec6_cryptotfm_context {
+ unsigned char aes_key[2*AES_MAX_KEY_SIZE];
+ size_t key_length;
+ u32 key_md;
+ int crypto_type;
+ struct crypto_skcipher *fallback;
+};
+
+struct artpec6_crypto_aead_hw_ctx {
+ __be64 aad_length_bits;
+ __be64 text_length_bits;
+ __u8 J0[AES_BLOCK_SIZE];
+};
+
+struct artpec6_crypto_aead_req_ctx {
+ struct artpec6_crypto_aead_hw_ctx hw_ctx;
+ u32 cipher_md;
+ bool decrypt;
+ struct artpec6_crypto_req_common common;
+ __u8 decryption_tag[AES_BLOCK_SIZE] ____cacheline_aligned;
+};
+
+/* The crypto framework makes it hard to avoid this global. */
+static struct device *artpec6_crypto_dev;
+
+static struct dentry *dbgfs_root;
+
+#ifdef CONFIG_FAULT_INJECTION
+static DECLARE_FAULT_ATTR(artpec6_crypto_fail_status_read);
+static DECLARE_FAULT_ATTR(artpec6_crypto_fail_dma_array_full);
+#endif
+
+enum {
+ ARTPEC6_CRYPTO_PREPARE_HASH_NO_START,
+ ARTPEC6_CRYPTO_PREPARE_HASH_START,
+};
+
+static int artpec6_crypto_prepare_aead(struct aead_request *areq);
+static int artpec6_crypto_prepare_crypto(struct skcipher_request *areq);
+static int artpec6_crypto_prepare_hash(struct ahash_request *areq);
+
+static void
+artpec6_crypto_complete_crypto(struct crypto_async_request *req);
+static void
+artpec6_crypto_complete_cbc_encrypt(struct crypto_async_request *req);
+static void
+artpec6_crypto_complete_cbc_decrypt(struct crypto_async_request *req);
+static void
+artpec6_crypto_complete_aead(struct crypto_async_request *req);
+static void
+artpec6_crypto_complete_hash(struct crypto_async_request *req);
+
+static int
+artpec6_crypto_common_destroy(struct artpec6_crypto_req_common *common);
+
+static void
+artpec6_crypto_start_dma(struct artpec6_crypto_req_common *common);
+
+struct artpec6_crypto_walk {
+ struct scatterlist *sg;
+ size_t offset;
+};
+
+static void artpec6_crypto_walk_init(struct artpec6_crypto_walk *awalk,
+ struct scatterlist *sg)
+{
+ awalk->sg = sg;
+ awalk->offset = 0;
+}
+
+static size_t artpec6_crypto_walk_advance(struct artpec6_crypto_walk *awalk,
+ size_t nbytes)
+{
+ while (nbytes && awalk->sg) {
+ size_t piece;
+
+ WARN_ON(awalk->offset > awalk->sg->length);
+
+ piece = min(nbytes, (size_t)awalk->sg->length - awalk->offset);
+ nbytes -= piece;
+ awalk->offset += piece;
+ if (awalk->offset == awalk->sg->length) {
+ awalk->sg = sg_next(awalk->sg);
+ awalk->offset = 0;
+ }
+
+ }
+
+ return nbytes;
+}
+
+static size_t
+artpec6_crypto_walk_chunklen(const struct artpec6_crypto_walk *awalk)
+{
+ WARN_ON(awalk->sg->length == awalk->offset);
+
+ return awalk->sg->length - awalk->offset;
+}
+
+static dma_addr_t
+artpec6_crypto_walk_chunk_phys(const struct artpec6_crypto_walk *awalk)
+{
+ return sg_phys(awalk->sg) + awalk->offset;
+}
+
+static void
+artpec6_crypto_copy_bounce_buffers(struct artpec6_crypto_req_common *common)
+{
+ struct artpec6_crypto_dma_descriptors *dma = common->dma;
+ struct artpec6_crypto_bounce_buffer *b;
+ struct artpec6_crypto_bounce_buffer *next;
+
+ list_for_each_entry_safe(b, next, &dma->bounce_buffers, list) {
+ pr_debug("bounce entry %p: %zu bytes @ %zu from %p\n",
+ b, b->length, b->offset, b->buf);
+ sg_pcopy_from_buffer(b->sg,
+ 1,
+ b->buf,
+ b->length,
+ b->offset);
+
+ list_del(&b->list);
+ kfree(b);
+ }
+}
+
+static inline bool artpec6_crypto_busy(void)
+{
+ struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
+ int fifo_count = ac->pending_count;
+
+ return fifo_count > 6;
+}
+
+static int artpec6_crypto_submit(struct artpec6_crypto_req_common *req)
+{
+ struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
+ int ret = -EBUSY;
+
+ spin_lock_bh(&ac->queue_lock);
+
+ if (!artpec6_crypto_busy()) {
+ list_add_tail(&req->list, &ac->pending);
+ artpec6_crypto_start_dma(req);
+ ret = -EINPROGRESS;
+ } else if (req->req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG) {
+ list_add_tail(&req->list, &ac->queue);
+ } else {
+ artpec6_crypto_common_destroy(req);
+ }
+
+ spin_unlock_bh(&ac->queue_lock);
+
+ return ret;
+}
+
+static void artpec6_crypto_start_dma(struct artpec6_crypto_req_common *common)
+{
+ struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
+ enum artpec6_crypto_variant variant = ac->variant;
+ void __iomem *base = ac->base;
+ struct artpec6_crypto_dma_descriptors *dma = common->dma;
+ u32 ind, statd, outd;
+
+ /* Make descriptor content visible to the DMA before starting it. */
+ wmb();
+
+ ind = FIELD_PREP(PDMA_IN_DESCRQ_PUSH_LEN, dma->in_cnt - 1) |
+ FIELD_PREP(PDMA_IN_DESCRQ_PUSH_ADDR, dma->in_dma_addr >> 6);
+
+ statd = FIELD_PREP(PDMA_IN_STATQ_PUSH_LEN, dma->in_cnt - 1) |
+ FIELD_PREP(PDMA_IN_STATQ_PUSH_ADDR, dma->stat_dma_addr >> 6);
+
+ outd = FIELD_PREP(PDMA_OUT_DESCRQ_PUSH_LEN, dma->out_cnt - 1) |
+ FIELD_PREP(PDMA_OUT_DESCRQ_PUSH_ADDR, dma->out_dma_addr >> 6);
+
+ if (variant == ARTPEC6_CRYPTO) {
+ writel_relaxed(ind, base + A6_PDMA_IN_DESCRQ_PUSH);
+ writel_relaxed(statd, base + A6_PDMA_IN_STATQ_PUSH);
+ writel_relaxed(PDMA_IN_CMD_START, base + A6_PDMA_IN_CMD);
+ } else {
+ writel_relaxed(ind, base + A7_PDMA_IN_DESCRQ_PUSH);
+ writel_relaxed(statd, base + A7_PDMA_IN_STATQ_PUSH);
+ writel_relaxed(PDMA_IN_CMD_START, base + A7_PDMA_IN_CMD);
+ }
+
+ writel_relaxed(outd, base + PDMA_OUT_DESCRQ_PUSH);
+ writel_relaxed(PDMA_OUT_CMD_START, base + PDMA_OUT_CMD);
+
+ ac->pending_count++;
+}
+
+static void
+artpec6_crypto_init_dma_operation(struct artpec6_crypto_req_common *common)
+{
+ struct artpec6_crypto_dma_descriptors *dma = common->dma;
+
+ dma->out_cnt = 0;
+ dma->in_cnt = 0;
+ dma->map_count = 0;
+ INIT_LIST_HEAD(&dma->bounce_buffers);
+}
+
+static bool fault_inject_dma_descr(void)
+{
+#ifdef CONFIG_FAULT_INJECTION
+ return should_fail(&artpec6_crypto_fail_dma_array_full, 1);
+#else
+ return false;
+#endif
+}
+
+/** artpec6_crypto_setup_out_descr_phys - Setup an out channel with a
+ * physical address
+ *
+ * @addr: The physical address of the data buffer
+ * @len: The length of the data buffer
+ * @eop: True if this is the last buffer in the packet
+ *
+ * @return 0 on success or -ENOSPC if there are no more descriptors available
+ */
+static int
+artpec6_crypto_setup_out_descr_phys(struct artpec6_crypto_req_common *common,
+ dma_addr_t addr, size_t len, bool eop)
+{
+ struct artpec6_crypto_dma_descriptors *dma = common->dma;
+ struct pdma_descr *d;
+
+ if (dma->out_cnt >= PDMA_DESCR_COUNT ||
+ fault_inject_dma_descr()) {
+ pr_err("No free OUT DMA descriptors available!\n");
+ return -ENOSPC;
+ }
+
+ d = &dma->out[dma->out_cnt++];
+ memset(d, 0, sizeof(*d));
+
+ d->ctrl.short_descr = 0;
+ d->ctrl.eop = eop;
+ d->data.len = len;
+ d->data.buf = addr;
+ return 0;
+}
+
+/** artpec6_crypto_setup_out_descr_short - Setup a short out descriptor
+ *
+ * @dst: The virtual address of the data
+ * @len: The length of the data, must be between 1 to 7 bytes
+ * @eop: True if this is the last buffer in the packet
+ *
+ * @return 0 on success
+ * -ENOSPC if no more descriptors are available
+ * -EINVAL if the data length exceeds 7 bytes
+ */
+static int
+artpec6_crypto_setup_out_descr_short(struct artpec6_crypto_req_common *common,
+ void *dst, unsigned int len, bool eop)
+{
+ struct artpec6_crypto_dma_descriptors *dma = common->dma;
+ struct pdma_descr *d;
+
+ if (dma->out_cnt >= PDMA_DESCR_COUNT ||
+ fault_inject_dma_descr()) {
+ pr_err("No free OUT DMA descriptors available!\n");
+ return -ENOSPC;
+ } else if (len > 7 || len < 1) {
+ return -EINVAL;
+ }
+ d = &dma->out[dma->out_cnt++];
+ memset(d, 0, sizeof(*d));
+
+ d->ctrl.short_descr = 1;
+ d->ctrl.short_len = len;
+ d->ctrl.eop = eop;
+ memcpy(d->shrt.data, dst, len);
+ return 0;
+}
+
+static int artpec6_crypto_dma_map_page(struct artpec6_crypto_req_common *common,
+ struct page *page, size_t offset,
+ size_t size,
+ enum dma_data_direction dir,
+ dma_addr_t *dma_addr_out)
+{
+ struct artpec6_crypto_dma_descriptors *dma = common->dma;
+ struct device *dev = artpec6_crypto_dev;
+ struct artpec6_crypto_dma_map *map;
+ dma_addr_t dma_addr;
+
+ *dma_addr_out = 0;
+
+ if (dma->map_count >= ARRAY_SIZE(dma->maps))
+ return -ENOMEM;
+
+ dma_addr = dma_map_page(dev, page, offset, size, dir);
+ if (dma_mapping_error(dev, dma_addr))
+ return -ENOMEM;
+
+ map = &dma->maps[dma->map_count++];
+ map->size = size;
+ map->dma_addr = dma_addr;
+ map->dir = dir;
+
+ *dma_addr_out = dma_addr;
+
+ return 0;
+}
+
+static int
+artpec6_crypto_dma_map_single(struct artpec6_crypto_req_common *common,
+ void *ptr, size_t size,
+ enum dma_data_direction dir,
+ dma_addr_t *dma_addr_out)
+{
+ struct page *page = virt_to_page(ptr);
+ size_t offset = (uintptr_t)ptr & ~PAGE_MASK;
+
+ return artpec6_crypto_dma_map_page(common, page, offset, size, dir,
+ dma_addr_out);
+}
+
+static int
+artpec6_crypto_dma_map_descs(struct artpec6_crypto_req_common *common)
+{
+ struct artpec6_crypto_dma_descriptors *dma = common->dma;
+ int ret;
+
+ ret = artpec6_crypto_dma_map_single(common, dma->in,
+ sizeof(dma->in[0]) * dma->in_cnt,
+ DMA_TO_DEVICE, &dma->in_dma_addr);
+ if (ret)
+ return ret;
+
+ ret = artpec6_crypto_dma_map_single(common, dma->out,
+ sizeof(dma->out[0]) * dma->out_cnt,
+ DMA_TO_DEVICE, &dma->out_dma_addr);
+ if (ret)
+ return ret;
+
+ /* We only read one stat descriptor */
+ dma->stat[dma->in_cnt - 1] = 0;
+
+ /*
+ * DMA_BIDIRECTIONAL since we need our zeroing of the stat descriptor
+ * to be written.
+ */
+ return artpec6_crypto_dma_map_single(common,
+ dma->stat + dma->in_cnt - 1,
+ sizeof(dma->stat[0]),
+ DMA_BIDIRECTIONAL,
+ &dma->stat_dma_addr);
+}
+
+static void
+artpec6_crypto_dma_unmap_all(struct artpec6_crypto_req_common *common)
+{
+ struct artpec6_crypto_dma_descriptors *dma = common->dma;
+ struct device *dev = artpec6_crypto_dev;
+ int i;
+
+ for (i = 0; i < dma->map_count; i++) {
+ struct artpec6_crypto_dma_map *map = &dma->maps[i];
+
+ dma_unmap_page(dev, map->dma_addr, map->size, map->dir);
+ }
+
+ dma->map_count = 0;
+}
+
+/** artpec6_crypto_setup_out_descr - Setup an out descriptor
+ *
+ * @dst: The virtual address of the data
+ * @len: The length of the data
+ * @eop: True if this is the last buffer in the packet
+ * @use_short: If this is true and the data length is 7 bytes or less then
+ * a short descriptor will be used
+ *
+ * @return 0 on success
+ * Any errors from artpec6_crypto_setup_out_descr_short() or
+ * setup_out_descr_phys()
+ */
+static int
+artpec6_crypto_setup_out_descr(struct artpec6_crypto_req_common *common,
+ void *dst, unsigned int len, bool eop,
+ bool use_short)
+{
+ if (use_short && len < 7) {
+ return artpec6_crypto_setup_out_descr_short(common, dst, len,
+ eop);
+ } else {
+ int ret;
+ dma_addr_t dma_addr;
+
+ ret = artpec6_crypto_dma_map_single(common, dst, len,
+ DMA_TO_DEVICE,
+ &dma_addr);
+ if (ret)
+ return ret;
+
+ return artpec6_crypto_setup_out_descr_phys(common, dma_addr,
+ len, eop);
+ }
+}
+
+/** artpec6_crypto_setup_in_descr_phys - Setup an in channel with a
+ * physical address
+ *
+ * @addr: The physical address of the data buffer
+ * @len: The length of the data buffer
+ * @intr: True if an interrupt should be fired after HW processing of this
+ * descriptor
+ *
+ */
+static int
+artpec6_crypto_setup_in_descr_phys(struct artpec6_crypto_req_common *common,
+ dma_addr_t addr, unsigned int len, bool intr)
+{
+ struct artpec6_crypto_dma_descriptors *dma = common->dma;
+ struct pdma_descr *d;
+
+ if (dma->in_cnt >= PDMA_DESCR_COUNT ||
+ fault_inject_dma_descr()) {
+ pr_err("No free IN DMA descriptors available!\n");
+ return -ENOSPC;
+ }
+ d = &dma->in[dma->in_cnt++];
+ memset(d, 0, sizeof(*d));
+
+ d->ctrl.intr = intr;
+ d->data.len = len;
+ d->data.buf = addr;
+ return 0;
+}
+
+/** artpec6_crypto_setup_in_descr - Setup an in channel descriptor
+ *
+ * @buffer: The virtual address to of the data buffer
+ * @len: The length of the data buffer
+ * @last: If this is the last data buffer in the request (i.e. an interrupt
+ * is needed
+ *
+ * Short descriptors are not used for the in channel
+ */
+static int
+artpec6_crypto_setup_in_descr(struct artpec6_crypto_req_common *common,
+ void *buffer, unsigned int len, bool last)
+{
+ dma_addr_t dma_addr;
+ int ret;
+
+ ret = artpec6_crypto_dma_map_single(common, buffer, len,
+ DMA_FROM_DEVICE, &dma_addr);
+ if (ret)
+ return ret;
+
+ return artpec6_crypto_setup_in_descr_phys(common, dma_addr, len, last);
+}
+
+static struct artpec6_crypto_bounce_buffer *
+artpec6_crypto_alloc_bounce(gfp_t flags)
+{
+ void *base;
+ size_t alloc_size = sizeof(struct artpec6_crypto_bounce_buffer) +
+ 2 * ARTPEC_CACHE_LINE_MAX;
+ struct artpec6_crypto_bounce_buffer *bbuf = kzalloc(alloc_size, flags);
+
+ if (!bbuf)
+ return NULL;
+
+ base = bbuf + 1;
+ bbuf->buf = PTR_ALIGN(base, ARTPEC_CACHE_LINE_MAX);
+ return bbuf;
+}
+
+static int setup_bounce_buffer_in(struct artpec6_crypto_req_common *common,
+ struct artpec6_crypto_walk *walk, size_t size)
+{
+ struct artpec6_crypto_bounce_buffer *bbuf;
+ int ret;
+
+ bbuf = artpec6_crypto_alloc_bounce(common->gfp_flags);
+ if (!bbuf)
+ return -ENOMEM;
+
+ bbuf->length = size;
+ bbuf->sg = walk->sg;
+ bbuf->offset = walk->offset;
+
+ ret = artpec6_crypto_setup_in_descr(common, bbuf->buf, size, false);
+ if (ret) {
+ kfree(bbuf);
+ return ret;
+ }
+
+ pr_debug("BOUNCE %zu offset %zu\n", size, walk->offset);
+ list_add_tail(&bbuf->list, &common->dma->bounce_buffers);
+ return 0;
+}
+
+static int
+artpec6_crypto_setup_sg_descrs_in(struct artpec6_crypto_req_common *common,
+ struct artpec6_crypto_walk *walk,
+ size_t count)
+{
+ size_t chunk;
+ int ret;
+ dma_addr_t addr;
+
+ while (walk->sg && count) {
+ chunk = min(count, artpec6_crypto_walk_chunklen(walk));
+ addr = artpec6_crypto_walk_chunk_phys(walk);
+
+ /* When destination buffers are not aligned to the cache line
+ * size we need bounce buffers. The DMA-API requires that the
+ * entire line is owned by the DMA buffer and this holds also
+ * for the case when coherent DMA is used.
+ */
+ if (!IS_ALIGNED(addr, ARTPEC_CACHE_LINE_MAX)) {
+ chunk = min_t(dma_addr_t, chunk,
+ ALIGN(addr, ARTPEC_CACHE_LINE_MAX) -
+ addr);
+
+ pr_debug("CHUNK-b %pad:%zu\n", &addr, chunk);
+ ret = setup_bounce_buffer_in(common, walk, chunk);
+ } else if (chunk < ARTPEC_CACHE_LINE_MAX) {
+ pr_debug("CHUNK-b %pad:%zu\n", &addr, chunk);
+ ret = setup_bounce_buffer_in(common, walk, chunk);
+ } else {
+ dma_addr_t dma_addr;
+
+ chunk = chunk & ~(ARTPEC_CACHE_LINE_MAX-1);
+
+ pr_debug("CHUNK %pad:%zu\n", &addr, chunk);
+
+ ret = artpec6_crypto_dma_map_page(common,
+ sg_page(walk->sg),
+ walk->sg->offset +
+ walk->offset,
+ chunk,
+ DMA_FROM_DEVICE,
+ &dma_addr);
+ if (ret)
+ return ret;
+
+ ret = artpec6_crypto_setup_in_descr_phys(common,
+ dma_addr,
+ chunk, false);
+ }
+
+ if (ret)
+ return ret;
+
+ count = count - chunk;
+ artpec6_crypto_walk_advance(walk, chunk);
+ }
+
+ if (count)
+ pr_err("EOL unexpected %zu bytes left\n", count);
+
+ return count ? -EINVAL : 0;
+}
+
+static int
+artpec6_crypto_setup_sg_descrs_out(struct artpec6_crypto_req_common *common,
+ struct artpec6_crypto_walk *walk,
+ size_t count)
+{
+ size_t chunk;
+ int ret;
+ dma_addr_t addr;
+
+ while (walk->sg && count) {
+ chunk = min(count, artpec6_crypto_walk_chunklen(walk));
+ addr = artpec6_crypto_walk_chunk_phys(walk);
+
+ pr_debug("OUT-CHUNK %pad:%zu\n", &addr, chunk);
+
+ if (addr & 3) {
+ char buf[3];
+
+ chunk = min_t(size_t, chunk, (4-(addr&3)));
+
+ sg_pcopy_to_buffer(walk->sg, 1, buf, chunk,
+ walk->offset);
+
+ ret = artpec6_crypto_setup_out_descr_short(common, buf,
+ chunk,
+ false);
+ } else {
+ dma_addr_t dma_addr;
+
+ ret = artpec6_crypto_dma_map_page(common,
+ sg_page(walk->sg),
+ walk->sg->offset +
+ walk->offset,
+ chunk,
+ DMA_TO_DEVICE,
+ &dma_addr);
+ if (ret)
+ return ret;
+
+ ret = artpec6_crypto_setup_out_descr_phys(common,
+ dma_addr,
+ chunk, false);
+ }
+
+ if (ret)
+ return ret;
+
+ count = count - chunk;
+ artpec6_crypto_walk_advance(walk, chunk);
+ }
+
+ if (count)
+ pr_err("EOL unexpected %zu bytes left\n", count);
+
+ return count ? -EINVAL : 0;
+}
+
+
+/** artpec6_crypto_terminate_out_descrs - Set the EOP on the last out descriptor
+ *
+ * If the out descriptor list is non-empty, then the eop flag on the
+ * last used out descriptor will be set.
+ *
+ * @return 0 on success
+ * -EINVAL if the out descriptor is empty or has overflown
+ */
+static int
+artpec6_crypto_terminate_out_descrs(struct artpec6_crypto_req_common *common)
+{
+ struct artpec6_crypto_dma_descriptors *dma = common->dma;
+ struct pdma_descr *d;
+
+ if (!dma->out_cnt || dma->out_cnt > PDMA_DESCR_COUNT) {
+ pr_err("%s: OUT descriptor list is %s\n",
+ MODULE_NAME, dma->out_cnt ? "empty" : "full");
+ return -EINVAL;
+
+ }
+
+ d = &dma->out[dma->out_cnt-1];
+ d->ctrl.eop = 1;
+
+ return 0;
+}
+
+/** artpec6_crypto_terminate_in_descrs - Set the interrupt flag on the last
+ * in descriptor
+ *
+ * See artpec6_crypto_terminate_out_descrs() for return values
+ */
+static int
+artpec6_crypto_terminate_in_descrs(struct artpec6_crypto_req_common *common)
+{
+ struct artpec6_crypto_dma_descriptors *dma = common->dma;
+ struct pdma_descr *d;
+
+ if (!dma->in_cnt || dma->in_cnt > PDMA_DESCR_COUNT) {
+ pr_err("%s: IN descriptor list is %s\n",
+ MODULE_NAME, dma->in_cnt ? "empty" : "full");
+ return -EINVAL;
+ }
+
+ d = &dma->in[dma->in_cnt-1];
+ d->ctrl.intr = 1;
+ return 0;
+}
+
+/** create_hash_pad - Create a Secure Hash conformant pad
+ *
+ * @dst: The destination buffer to write the pad. Must be at least 64 bytes
+ * @dgstlen: The total length of the hash digest in bytes
+ * @bitcount: The total length of the digest in bits
+ *
+ * @return The total number of padding bytes written to @dst
+ */
+static size_t
+create_hash_pad(int oper, unsigned char *dst, u64 dgstlen, u64 bitcount)
+{
+ unsigned int mod, target, diff, pad_bytes, size_bytes;
+ __be64 bits = __cpu_to_be64(bitcount);
+
+ switch (oper) {
+ case regk_crypto_sha1:
+ case regk_crypto_sha256:
+ case regk_crypto_hmac_sha1:
+ case regk_crypto_hmac_sha256:
+ target = 448 / 8;
+ mod = 512 / 8;
+ size_bytes = 8;
+ break;
+ default:
+ target = 896 / 8;
+ mod = 1024 / 8;
+ size_bytes = 16;
+ break;
+ }
+
+ target -= 1;
+ diff = dgstlen & (mod - 1);
+ pad_bytes = diff > target ? target + mod - diff : target - diff;
+
+ memset(dst + 1, 0, pad_bytes);
+ dst[0] = 0x80;
+
+ if (size_bytes == 16) {
+ memset(dst + 1 + pad_bytes, 0, 8);
+ memcpy(dst + 1 + pad_bytes + 8, &bits, 8);
+ } else {
+ memcpy(dst + 1 + pad_bytes, &bits, 8);
+ }
+
+ return pad_bytes + size_bytes + 1;
+}
+
+static int artpec6_crypto_common_init(struct artpec6_crypto_req_common *common,
+ struct crypto_async_request *parent,
+ void (*complete)(struct crypto_async_request *req),
+ struct scatterlist *dstsg, unsigned int nbytes)
+{
+ gfp_t flags;
+ struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
+
+ flags = (parent->flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC;
+
+ common->gfp_flags = flags;
+ common->dma = kmem_cache_alloc(ac->dma_cache, flags);
+ if (!common->dma)
+ return -ENOMEM;
+
+ common->req = parent;
+ common->complete = complete;
+ return 0;
+}
+
+static void
+artpec6_crypto_bounce_destroy(struct artpec6_crypto_dma_descriptors *dma)
+{
+ struct artpec6_crypto_bounce_buffer *b;
+ struct artpec6_crypto_bounce_buffer *next;
+
+ list_for_each_entry_safe(b, next, &dma->bounce_buffers, list) {
+ kfree(b);
+ }
+}
+
+static int
+artpec6_crypto_common_destroy(struct artpec6_crypto_req_common *common)
+{
+ struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
+
+ artpec6_crypto_dma_unmap_all(common);
+ artpec6_crypto_bounce_destroy(common->dma);
+ kmem_cache_free(ac->dma_cache, common->dma);
+ common->dma = NULL;
+ return 0;
+}
+
+/*
+ * Ciphering functions.
+ */
+static int artpec6_crypto_encrypt(struct skcipher_request *req)
+{
+ struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
+ struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(cipher);
+ struct artpec6_crypto_request_context *req_ctx = NULL;
+ void (*complete)(struct crypto_async_request *req);
+ int ret;
+
+ req_ctx = skcipher_request_ctx(req);
+
+ switch (ctx->crypto_type) {
+ case ARTPEC6_CRYPTO_CIPHER_AES_CBC:
+ case ARTPEC6_CRYPTO_CIPHER_AES_ECB:
+ case ARTPEC6_CRYPTO_CIPHER_AES_XTS:
+ req_ctx->decrypt = 0;
+ break;
+ default:
+ break;
+ }
+
+ switch (ctx->crypto_type) {
+ case ARTPEC6_CRYPTO_CIPHER_AES_CBC:
+ complete = artpec6_crypto_complete_cbc_encrypt;
+ break;
+ default:
+ complete = artpec6_crypto_complete_crypto;
+ break;
+ }
+
+ ret = artpec6_crypto_common_init(&req_ctx->common,
+ &req->base,
+ complete,
+ req->dst, req->cryptlen);
+ if (ret)
+ return ret;
+
+ ret = artpec6_crypto_prepare_crypto(req);
+ if (ret) {
+ artpec6_crypto_common_destroy(&req_ctx->common);
+ return ret;
+ }
+
+ return artpec6_crypto_submit(&req_ctx->common);
+}
+
+static int artpec6_crypto_decrypt(struct skcipher_request *req)
+{
+ int ret;
+ struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
+ struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(cipher);
+ struct artpec6_crypto_request_context *req_ctx = NULL;
+ void (*complete)(struct crypto_async_request *req);
+
+ req_ctx = skcipher_request_ctx(req);
+
+ switch (ctx->crypto_type) {
+ case ARTPEC6_CRYPTO_CIPHER_AES_CBC:
+ case ARTPEC6_CRYPTO_CIPHER_AES_ECB:
+ case ARTPEC6_CRYPTO_CIPHER_AES_XTS:
+ req_ctx->decrypt = 1;
+ break;
+ default:
+ break;
+ }
+
+
+ switch (ctx->crypto_type) {
+ case ARTPEC6_CRYPTO_CIPHER_AES_CBC:
+ complete = artpec6_crypto_complete_cbc_decrypt;
+ break;
+ default:
+ complete = artpec6_crypto_complete_crypto;
+ break;
+ }
+
+ ret = artpec6_crypto_common_init(&req_ctx->common, &req->base,
+ complete,
+ req->dst, req->cryptlen);
+ if (ret)
+ return ret;
+
+ ret = artpec6_crypto_prepare_crypto(req);
+ if (ret) {
+ artpec6_crypto_common_destroy(&req_ctx->common);
+ return ret;
+ }
+
+ return artpec6_crypto_submit(&req_ctx->common);
+}
+
+static int
+artpec6_crypto_ctr_crypt(struct skcipher_request *req, bool encrypt)
+{
+ struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
+ struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(cipher);
+ size_t iv_len = crypto_skcipher_ivsize(cipher);
+ unsigned int counter = be32_to_cpup((__be32 *)
+ (req->iv + iv_len - 4));
+ unsigned int nblks = ALIGN(req->cryptlen, AES_BLOCK_SIZE) /
+ AES_BLOCK_SIZE;
+
+ /*
+ * The hardware uses only the last 32-bits as the counter while the
+ * kernel tests (aes_ctr_enc_tv_template[4] for example) expect that
+ * the whole IV is a counter. So fallback if the counter is going to
+ * overlow.
+ */
+ if (counter + nblks < counter) {
+ int ret;
+
+ pr_debug("counter %x will overflow (nblks %u), falling back\n",
+ counter, counter + nblks);
+
+ ret = crypto_skcipher_setkey(ctx->fallback, ctx->aes_key,
+ ctx->key_length);
+ if (ret)
+ return ret;
+
+ {
+ SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
+
+ skcipher_request_set_tfm(subreq, ctx->fallback);
+ skcipher_request_set_callback(subreq, req->base.flags,
+ NULL, NULL);
+ skcipher_request_set_crypt(subreq, req->src, req->dst,
+ req->cryptlen, req->iv);
+ ret = encrypt ? crypto_skcipher_encrypt(subreq)
+ : crypto_skcipher_decrypt(subreq);
+ skcipher_request_zero(subreq);
+ }
+ return ret;
+ }
+
+ return encrypt ? artpec6_crypto_encrypt(req)
+ : artpec6_crypto_decrypt(req);
+}
+
+static int artpec6_crypto_ctr_encrypt(struct skcipher_request *req)
+{
+ return artpec6_crypto_ctr_crypt(req, true);
+}
+
+static int artpec6_crypto_ctr_decrypt(struct skcipher_request *req)
+{
+ return artpec6_crypto_ctr_crypt(req, false);
+}
+
+/*
+ * AEAD functions
+ */
+static int artpec6_crypto_aead_init(struct crypto_aead *tfm)
+{
+ struct artpec6_cryptotfm_context *tfm_ctx = crypto_aead_ctx(tfm);
+
+ memset(tfm_ctx, 0, sizeof(*tfm_ctx));
+
+ crypto_aead_set_reqsize(tfm,
+ sizeof(struct artpec6_crypto_aead_req_ctx));
+
+ return 0;
+}
+
+static int artpec6_crypto_aead_set_key(struct crypto_aead *tfm, const u8 *key,
+ unsigned int len)
+{
+ struct artpec6_cryptotfm_context *ctx = crypto_tfm_ctx(&tfm->base);
+
+ if (len != 16 && len != 24 && len != 32) {
+ crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -1;
+ }
+
+ ctx->key_length = len;
+
+ memcpy(ctx->aes_key, key, len);
+ return 0;
+}
+
+static int artpec6_crypto_aead_encrypt(struct aead_request *req)
+{
+ int ret;
+ struct artpec6_crypto_aead_req_ctx *req_ctx = aead_request_ctx(req);
+
+ req_ctx->decrypt = false;
+ ret = artpec6_crypto_common_init(&req_ctx->common, &req->base,
+ artpec6_crypto_complete_aead,
+ NULL, 0);
+ if (ret)
+ return ret;
+
+ ret = artpec6_crypto_prepare_aead(req);
+ if (ret) {
+ artpec6_crypto_common_destroy(&req_ctx->common);
+ return ret;
+ }
+
+ return artpec6_crypto_submit(&req_ctx->common);
+}
+
+static int artpec6_crypto_aead_decrypt(struct aead_request *req)
+{
+ int ret;
+ struct artpec6_crypto_aead_req_ctx *req_ctx = aead_request_ctx(req);
+
+ req_ctx->decrypt = true;
+ if (req->cryptlen < AES_BLOCK_SIZE)
+ return -EINVAL;
+
+ ret = artpec6_crypto_common_init(&req_ctx->common,
+ &req->base,
+ artpec6_crypto_complete_aead,
+ NULL, 0);
+ if (ret)
+ return ret;
+
+ ret = artpec6_crypto_prepare_aead(req);
+ if (ret) {
+ artpec6_crypto_common_destroy(&req_ctx->common);
+ return ret;
+ }
+
+ return artpec6_crypto_submit(&req_ctx->common);
+}
+
+static int artpec6_crypto_prepare_hash(struct ahash_request *areq)
+{
+ struct artpec6_hashalg_context *ctx = crypto_tfm_ctx(areq->base.tfm);
+ struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(areq);
+ size_t digestsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(areq));
+ size_t contextsize = digestsize == SHA384_DIGEST_SIZE ?
+ SHA512_DIGEST_SIZE : digestsize;
+ size_t blocksize = crypto_tfm_alg_blocksize(
+ crypto_ahash_tfm(crypto_ahash_reqtfm(areq)));
+ struct artpec6_crypto_req_common *common = &req_ctx->common;
+ struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
+ enum artpec6_crypto_variant variant = ac->variant;
+ u32 sel_ctx;
+ bool ext_ctx = false;
+ bool run_hw = false;
+ int error = 0;
+
+ artpec6_crypto_init_dma_operation(common);
+
+ /* Upload HMAC key, must be first the first packet */
+ if (req_ctx->hash_flags & HASH_FLAG_HMAC) {
+ if (variant == ARTPEC6_CRYPTO) {
+ req_ctx->key_md = FIELD_PREP(A6_CRY_MD_OPER,
+ a6_regk_crypto_dlkey);
+ } else {
+ req_ctx->key_md = FIELD_PREP(A7_CRY_MD_OPER,
+ a7_regk_crypto_dlkey);
+ }
+
+ /* Copy and pad up the key */
+ memcpy(req_ctx->key_buffer, ctx->hmac_key,
+ ctx->hmac_key_length);
+ memset(req_ctx->key_buffer + ctx->hmac_key_length, 0,
+ blocksize - ctx->hmac_key_length);
+
+ error = artpec6_crypto_setup_out_descr(common,
+ (void *)&req_ctx->key_md,
+ sizeof(req_ctx->key_md), false, false);
+ if (error)
+ return error;
+
+ error = artpec6_crypto_setup_out_descr(common,
+ req_ctx->key_buffer, blocksize,
+ true, false);
+ if (error)
+ return error;
+ }
+
+ if (!(req_ctx->hash_flags & HASH_FLAG_INIT_CTX)) {
+ /* Restore context */
+ sel_ctx = regk_crypto_ext;
+ ext_ctx = true;
+ } else {
+ sel_ctx = regk_crypto_init;
+ }
+
+ if (variant == ARTPEC6_CRYPTO) {
+ req_ctx->hash_md &= ~A6_CRY_MD_HASH_SEL_CTX;
+ req_ctx->hash_md |= FIELD_PREP(A6_CRY_MD_HASH_SEL_CTX, sel_ctx);
+
+ /* If this is the final round, set the final flag */
+ if (req_ctx->hash_flags & HASH_FLAG_FINALIZE)
+ req_ctx->hash_md |= A6_CRY_MD_HASH_HMAC_FIN;
+ } else {
+ req_ctx->hash_md &= ~A7_CRY_MD_HASH_SEL_CTX;
+ req_ctx->hash_md |= FIELD_PREP(A7_CRY_MD_HASH_SEL_CTX, sel_ctx);
+
+ /* If this is the final round, set the final flag */
+ if (req_ctx->hash_flags & HASH_FLAG_FINALIZE)
+ req_ctx->hash_md |= A7_CRY_MD_HASH_HMAC_FIN;
+ }
+
+ /* Setup up metadata descriptors */
+ error = artpec6_crypto_setup_out_descr(common,
+ (void *)&req_ctx->hash_md,
+ sizeof(req_ctx->hash_md), false, false);
+ if (error)
+ return error;
+
+ error = artpec6_crypto_setup_in_descr(common, ac->pad_buffer, 4, false);
+ if (error)
+ return error;
+
+ if (ext_ctx) {
+ error = artpec6_crypto_setup_out_descr(common,
+ req_ctx->digeststate,
+ contextsize, false, false);
+
+ if (error)
+ return error;
+ }
+
+ if (req_ctx->hash_flags & HASH_FLAG_UPDATE) {
+ size_t done_bytes = 0;
+ size_t total_bytes = areq->nbytes + req_ctx->partial_bytes;
+ size_t ready_bytes = round_down(total_bytes, blocksize);
+ struct artpec6_crypto_walk walk;
+
+ run_hw = ready_bytes > 0;
+ if (req_ctx->partial_bytes && ready_bytes) {
+ /* We have a partial buffer and will at least some bytes
+ * to the HW. Empty this partial buffer before tackling
+ * the SG lists
+ */
+ memcpy(req_ctx->partial_buffer_out,
+ req_ctx->partial_buffer,
+ req_ctx->partial_bytes);
+
+ error = artpec6_crypto_setup_out_descr(common,
+ req_ctx->partial_buffer_out,
+ req_ctx->partial_bytes,
+ false, true);
+ if (error)
+ return error;
+
+ /* Reset partial buffer */
+ done_bytes += req_ctx->partial_bytes;
+ req_ctx->partial_bytes = 0;
+ }
+
+ artpec6_crypto_walk_init(&walk, areq->src);
+
+ error = artpec6_crypto_setup_sg_descrs_out(common, &walk,
+ ready_bytes -
+ done_bytes);
+ if (error)
+ return error;
+
+ if (walk.sg) {
+ size_t sg_skip = ready_bytes - done_bytes;
+ size_t sg_rem = areq->nbytes - sg_skip;
+
+ sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
+ req_ctx->partial_buffer +
+ req_ctx->partial_bytes,
+ sg_rem, sg_skip);
+
+ req_ctx->partial_bytes += sg_rem;
+ }
+
+ req_ctx->digcnt += ready_bytes;
+ req_ctx->hash_flags &= ~(HASH_FLAG_UPDATE);
+ }
+
+ /* Finalize */
+ if (req_ctx->hash_flags & HASH_FLAG_FINALIZE) {
+ bool needtrim = contextsize != digestsize;
+ size_t hash_pad_len;
+ u64 digest_bits;
+ u32 oper;
+
+ if (variant == ARTPEC6_CRYPTO)
+ oper = FIELD_GET(A6_CRY_MD_OPER, req_ctx->hash_md);
+ else
+ oper = FIELD_GET(A7_CRY_MD_OPER, req_ctx->hash_md);
+
+ /* Write out the partial buffer if present */
+ if (req_ctx->partial_bytes) {
+ memcpy(req_ctx->partial_buffer_out,
+ req_ctx->partial_buffer,
+ req_ctx->partial_bytes);
+ error = artpec6_crypto_setup_out_descr(common,
+ req_ctx->partial_buffer_out,
+ req_ctx->partial_bytes,
+ false, true);
+ if (error)
+ return error;
+
+ req_ctx->digcnt += req_ctx->partial_bytes;
+ req_ctx->partial_bytes = 0;
+ }
+
+ if (req_ctx->hash_flags & HASH_FLAG_HMAC)
+ digest_bits = 8 * (req_ctx->digcnt + blocksize);
+ else
+ digest_bits = 8 * req_ctx->digcnt;
+
+ /* Add the hash pad */
+ hash_pad_len = create_hash_pad(oper, req_ctx->pad_buffer,
+ req_ctx->digcnt, digest_bits);
+ error = artpec6_crypto_setup_out_descr(common,
+ req_ctx->pad_buffer,
+ hash_pad_len, false,
+ true);
+ req_ctx->digcnt = 0;
+
+ if (error)
+ return error;
+
+ /* Descriptor for the final result */
+ error = artpec6_crypto_setup_in_descr(common, areq->result,
+ digestsize,
+ !needtrim);
+ if (error)
+ return error;
+
+ if (needtrim) {
+ /* Discard the extra context bytes for SHA-384 */
+ error = artpec6_crypto_setup_in_descr(common,
+ req_ctx->partial_buffer,
+ digestsize - contextsize, true);
+ if (error)
+ return error;
+ }
+
+ } else { /* This is not the final operation for this request */
+ if (!run_hw)
+ return ARTPEC6_CRYPTO_PREPARE_HASH_NO_START;
+
+ /* Save the result to the context */
+ error = artpec6_crypto_setup_in_descr(common,
+ req_ctx->digeststate,
+ contextsize, false);
+ if (error)
+ return error;
+ /* fall through */
+ }
+
+ req_ctx->hash_flags &= ~(HASH_FLAG_INIT_CTX | HASH_FLAG_UPDATE |
+ HASH_FLAG_FINALIZE);
+
+ error = artpec6_crypto_terminate_in_descrs(common);
+ if (error)
+ return error;
+
+ error = artpec6_crypto_terminate_out_descrs(common);
+ if (error)
+ return error;
+
+ error = artpec6_crypto_dma_map_descs(common);
+ if (error)
+ return error;
+
+ return ARTPEC6_CRYPTO_PREPARE_HASH_START;
+}
+
+
+static int artpec6_crypto_aes_ecb_init(struct crypto_skcipher *tfm)
+{
+ struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
+
+ tfm->reqsize = sizeof(struct artpec6_crypto_request_context);
+ ctx->crypto_type = ARTPEC6_CRYPTO_CIPHER_AES_ECB;
+
+ return 0;
+}
+
+static int artpec6_crypto_aes_ctr_init(struct crypto_skcipher *tfm)
+{
+ struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
+
+ ctx->fallback = crypto_alloc_skcipher(crypto_tfm_alg_name(&tfm->base),
+ 0,
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(ctx->fallback))
+ return PTR_ERR(ctx->fallback);
+
+ tfm->reqsize = sizeof(struct artpec6_crypto_request_context);
+ ctx->crypto_type = ARTPEC6_CRYPTO_CIPHER_AES_CTR;
+
+ return 0;
+}
+
+static int artpec6_crypto_aes_cbc_init(struct crypto_skcipher *tfm)
+{
+ struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
+
+ tfm->reqsize = sizeof(struct artpec6_crypto_request_context);
+ ctx->crypto_type = ARTPEC6_CRYPTO_CIPHER_AES_CBC;
+
+ return 0;
+}
+
+static int artpec6_crypto_aes_xts_init(struct crypto_skcipher *tfm)
+{
+ struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
+
+ tfm->reqsize = sizeof(struct artpec6_crypto_request_context);
+ ctx->crypto_type = ARTPEC6_CRYPTO_CIPHER_AES_XTS;
+
+ return 0;
+}
+
+static void artpec6_crypto_aes_exit(struct crypto_skcipher *tfm)
+{
+ struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
+
+ memset(ctx, 0, sizeof(*ctx));
+}
+
+static void artpec6_crypto_aes_ctr_exit(struct crypto_skcipher *tfm)
+{
+ struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
+
+ crypto_free_skcipher(ctx->fallback);
+ artpec6_crypto_aes_exit(tfm);
+}
+
+static int
+artpec6_crypto_cipher_set_key(struct crypto_skcipher *cipher, const u8 *key,
+ unsigned int keylen)
+{
+ struct artpec6_cryptotfm_context *ctx =
+ crypto_skcipher_ctx(cipher);
+
+ switch (keylen) {
+ case 16:
+ case 24:
+ case 32:
+ break;
+ default:
+ crypto_skcipher_set_flags(cipher,
+ CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+
+ memcpy(ctx->aes_key, key, keylen);
+ ctx->key_length = keylen;
+ return 0;
+}
+
+static int
+artpec6_crypto_xts_set_key(struct crypto_skcipher *cipher, const u8 *key,
+ unsigned int keylen)
+{
+ struct artpec6_cryptotfm_context *ctx =
+ crypto_skcipher_ctx(cipher);
+ int ret;
+
+ ret = xts_check_key(&cipher->base, key, keylen);
+ if (ret)
+ return ret;
+
+ switch (keylen) {
+ case 32:
+ case 48:
+ case 64:
+ break;
+ default:
+ crypto_skcipher_set_flags(cipher,
+ CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+
+ memcpy(ctx->aes_key, key, keylen);
+ ctx->key_length = keylen;
+ return 0;
+}
+
+/** artpec6_crypto_process_crypto - Prepare an async block cipher crypto request
+ *
+ * @req: The asynch request to process
+ *
+ * @return 0 if the dma job was successfully prepared
+ * <0 on error
+ *
+ * This function sets up the PDMA descriptors for a block cipher request.
+ *
+ * The required padding is added for AES-CTR using a statically defined
+ * buffer.
+ *
+ * The PDMA descriptor list will be as follows:
+ *
+ * OUT: [KEY_MD][KEY][EOP]<CIPHER_MD>[IV]<data_0>...[data_n][AES-CTR_pad]<eop>
+ * IN: <CIPHER_MD><data_0>...[data_n]<intr>
+ *
+ */
+static int artpec6_crypto_prepare_crypto(struct skcipher_request *areq)
+{
+ int ret;
+ struct artpec6_crypto_walk walk;
+ struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq);
+ struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(cipher);
+ struct artpec6_crypto_request_context *req_ctx = NULL;
+ size_t iv_len = crypto_skcipher_ivsize(cipher);
+ struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
+ enum artpec6_crypto_variant variant = ac->variant;
+ struct artpec6_crypto_req_common *common;
+ bool cipher_decr = false;
+ size_t cipher_klen;
+ u32 cipher_len = 0; /* Same as regk_crypto_key_128 for NULL crypto */
+ u32 oper;
+
+ req_ctx = skcipher_request_ctx(areq);
+ common = &req_ctx->common;
+
+ artpec6_crypto_init_dma_operation(common);
+
+ if (variant == ARTPEC6_CRYPTO)
+ ctx->key_md = FIELD_PREP(A6_CRY_MD_OPER, a6_regk_crypto_dlkey);
+ else
+ ctx->key_md = FIELD_PREP(A7_CRY_MD_OPER, a7_regk_crypto_dlkey);
+
+ ret = artpec6_crypto_setup_out_descr(common, (void *)&ctx->key_md,
+ sizeof(ctx->key_md), false, false);
+ if (ret)
+ return ret;
+
+ ret = artpec6_crypto_setup_out_descr(common, ctx->aes_key,
+ ctx->key_length, true, false);
+ if (ret)
+ return ret;
+
+ req_ctx->cipher_md = 0;
+
+ if (ctx->crypto_type == ARTPEC6_CRYPTO_CIPHER_AES_XTS)
+ cipher_klen = ctx->key_length/2;
+ else
+ cipher_klen = ctx->key_length;
+
+ /* Metadata */
+ switch (cipher_klen) {
+ case 16:
+ cipher_len = regk_crypto_key_128;
+ break;
+ case 24:
+ cipher_len = regk_crypto_key_192;
+ break;
+ case 32:
+ cipher_len = regk_crypto_key_256;
+ break;
+ default:
+ pr_err("%s: Invalid key length %d!\n",
+ MODULE_NAME, ctx->key_length);
+ return -EINVAL;
+ }
+
+ switch (ctx->crypto_type) {
+ case ARTPEC6_CRYPTO_CIPHER_AES_ECB:
+ oper = regk_crypto_aes_ecb;
+ cipher_decr = req_ctx->decrypt;
+ break;
+
+ case ARTPEC6_CRYPTO_CIPHER_AES_CBC:
+ oper = regk_crypto_aes_cbc;
+ cipher_decr = req_ctx->decrypt;
+ break;
+
+ case ARTPEC6_CRYPTO_CIPHER_AES_CTR:
+ oper = regk_crypto_aes_ctr;
+ cipher_decr = false;
+ break;
+
+ case ARTPEC6_CRYPTO_CIPHER_AES_XTS:
+ oper = regk_crypto_aes_xts;
+ cipher_decr = req_ctx->decrypt;
+
+ if (variant == ARTPEC6_CRYPTO)
+ req_ctx->cipher_md |= A6_CRY_MD_CIPHER_DSEQ;
+ else
+ req_ctx->cipher_md |= A7_CRY_MD_CIPHER_DSEQ;
+ break;
+
+ default:
+ pr_err("%s: Invalid cipher mode %d!\n",
+ MODULE_NAME, ctx->crypto_type);
+ return -EINVAL;
+ }
+
+ if (variant == ARTPEC6_CRYPTO) {
+ req_ctx->cipher_md |= FIELD_PREP(A6_CRY_MD_OPER, oper);
+ req_ctx->cipher_md |= FIELD_PREP(A6_CRY_MD_CIPHER_LEN,
+ cipher_len);
+ if (cipher_decr)
+ req_ctx->cipher_md |= A6_CRY_MD_CIPHER_DECR;
+ } else {
+ req_ctx->cipher_md |= FIELD_PREP(A7_CRY_MD_OPER, oper);
+ req_ctx->cipher_md |= FIELD_PREP(A7_CRY_MD_CIPHER_LEN,
+ cipher_len);
+ if (cipher_decr)
+ req_ctx->cipher_md |= A7_CRY_MD_CIPHER_DECR;
+ }
+
+ ret = artpec6_crypto_setup_out_descr(common,
+ &req_ctx->cipher_md,
+ sizeof(req_ctx->cipher_md),
+ false, false);
+ if (ret)
+ return ret;
+
+ ret = artpec6_crypto_setup_in_descr(common, ac->pad_buffer, 4, false);
+ if (ret)
+ return ret;
+
+ if (iv_len) {
+ ret = artpec6_crypto_setup_out_descr(common, areq->iv, iv_len,
+ false, false);
+ if (ret)
+ return ret;
+ }
+ /* Data out */
+ artpec6_crypto_walk_init(&walk, areq->src);
+ ret = artpec6_crypto_setup_sg_descrs_out(common, &walk, areq->cryptlen);
+ if (ret)
+ return ret;
+
+ /* Data in */
+ artpec6_crypto_walk_init(&walk, areq->dst);
+ ret = artpec6_crypto_setup_sg_descrs_in(common, &walk, areq->cryptlen);
+ if (ret)
+ return ret;
+
+ /* CTR-mode padding required by the HW. */
+ if (ctx->crypto_type == ARTPEC6_CRYPTO_CIPHER_AES_CTR ||
+ ctx->crypto_type == ARTPEC6_CRYPTO_CIPHER_AES_XTS) {
+ size_t pad = ALIGN(areq->cryptlen, AES_BLOCK_SIZE) -
+ areq->cryptlen;
+
+ if (pad) {
+ ret = artpec6_crypto_setup_out_descr(common,
+ ac->pad_buffer,
+ pad, false, false);
+ if (ret)
+ return ret;
+
+ ret = artpec6_crypto_setup_in_descr(common,
+ ac->pad_buffer, pad,
+ false);
+ if (ret)
+ return ret;
+ }
+ }
+
+ ret = artpec6_crypto_terminate_out_descrs(common);
+ if (ret)
+ return ret;
+
+ ret = artpec6_crypto_terminate_in_descrs(common);
+ if (ret)
+ return ret;
+
+ return artpec6_crypto_dma_map_descs(common);
+}
+
+static int artpec6_crypto_prepare_aead(struct aead_request *areq)
+{
+ size_t count;
+ int ret;
+ size_t input_length;
+ struct artpec6_cryptotfm_context *ctx = crypto_tfm_ctx(areq->base.tfm);
+ struct artpec6_crypto_aead_req_ctx *req_ctx = aead_request_ctx(areq);
+ struct crypto_aead *cipher = crypto_aead_reqtfm(areq);
+ struct artpec6_crypto_req_common *common = &req_ctx->common;
+ struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
+ enum artpec6_crypto_variant variant = ac->variant;
+ u32 md_cipher_len;
+
+ artpec6_crypto_init_dma_operation(common);
+
+ /* Key */
+ if (variant == ARTPEC6_CRYPTO) {
+ ctx->key_md = FIELD_PREP(A6_CRY_MD_OPER,
+ a6_regk_crypto_dlkey);
+ } else {
+ ctx->key_md = FIELD_PREP(A7_CRY_MD_OPER,
+ a7_regk_crypto_dlkey);
+ }
+ ret = artpec6_crypto_setup_out_descr(common, (void *)&ctx->key_md,
+ sizeof(ctx->key_md), false, false);
+ if (ret)
+ return ret;
+
+ ret = artpec6_crypto_setup_out_descr(common, ctx->aes_key,
+ ctx->key_length, true, false);
+ if (ret)
+ return ret;
+
+ req_ctx->cipher_md = 0;
+
+ switch (ctx->key_length) {
+ case 16:
+ md_cipher_len = regk_crypto_key_128;
+ break;
+ case 24:
+ md_cipher_len = regk_crypto_key_192;
+ break;
+ case 32:
+ md_cipher_len = regk_crypto_key_256;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (variant == ARTPEC6_CRYPTO) {
+ req_ctx->cipher_md |= FIELD_PREP(A6_CRY_MD_OPER,
+ regk_crypto_aes_gcm);
+ req_ctx->cipher_md |= FIELD_PREP(A6_CRY_MD_CIPHER_LEN,
+ md_cipher_len);
+ if (req_ctx->decrypt)
+ req_ctx->cipher_md |= A6_CRY_MD_CIPHER_DECR;
+ } else {
+ req_ctx->cipher_md |= FIELD_PREP(A7_CRY_MD_OPER,
+ regk_crypto_aes_gcm);
+ req_ctx->cipher_md |= FIELD_PREP(A7_CRY_MD_CIPHER_LEN,
+ md_cipher_len);
+ if (req_ctx->decrypt)
+ req_ctx->cipher_md |= A7_CRY_MD_CIPHER_DECR;
+ }
+
+ ret = artpec6_crypto_setup_out_descr(common,
+ (void *) &req_ctx->cipher_md,
+ sizeof(req_ctx->cipher_md), false,
+ false);
+ if (ret)
+ return ret;
+
+ ret = artpec6_crypto_setup_in_descr(common, ac->pad_buffer, 4, false);
+ if (ret)
+ return ret;
+
+ /* For the decryption, cryptlen includes the tag. */
+ input_length = areq->cryptlen;
+ if (req_ctx->decrypt)
+ input_length -= AES_BLOCK_SIZE;
+
+ /* Prepare the context buffer */
+ req_ctx->hw_ctx.aad_length_bits =
+ __cpu_to_be64(8*areq->assoclen);
+
+ req_ctx->hw_ctx.text_length_bits =
+ __cpu_to_be64(8*input_length);
+
+ memcpy(req_ctx->hw_ctx.J0, areq->iv, crypto_aead_ivsize(cipher));
+ // The HW omits the initial increment of the counter field.
+ crypto_inc(req_ctx->hw_ctx.J0+12, 4);
+
+ ret = artpec6_crypto_setup_out_descr(common, &req_ctx->hw_ctx,
+ sizeof(struct artpec6_crypto_aead_hw_ctx), false, false);
+ if (ret)
+ return ret;
+
+ {
+ struct artpec6_crypto_walk walk;
+
+ artpec6_crypto_walk_init(&walk, areq->src);
+
+ /* Associated data */
+ count = areq->assoclen;
+ ret = artpec6_crypto_setup_sg_descrs_out(common, &walk, count);
+ if (ret)
+ return ret;
+
+ if (!IS_ALIGNED(areq->assoclen, 16)) {
+ size_t assoc_pad = 16 - (areq->assoclen % 16);
+ /* The HW mandates zero padding here */
+ ret = artpec6_crypto_setup_out_descr(common,
+ ac->zero_buffer,
+ assoc_pad, false,
+ false);
+ if (ret)
+ return ret;
+ }
+
+ /* Data to crypto */
+ count = input_length;
+ ret = artpec6_crypto_setup_sg_descrs_out(common, &walk, count);
+ if (ret)
+ return ret;
+
+ if (!IS_ALIGNED(input_length, 16)) {
+ size_t crypto_pad = 16 - (input_length % 16);
+ /* The HW mandates zero padding here */
+ ret = artpec6_crypto_setup_out_descr(common,
+ ac->zero_buffer,
+ crypto_pad,
+ false,
+ false);
+ if (ret)
+ return ret;
+ }
+ }
+
+ /* Data from crypto */
+ {
+ struct artpec6_crypto_walk walk;
+ size_t output_len = areq->cryptlen;
+
+ if (req_ctx->decrypt)
+ output_len -= AES_BLOCK_SIZE;
+
+ artpec6_crypto_walk_init(&walk, areq->dst);
+
+ /* skip associated data in the output */
+ count = artpec6_crypto_walk_advance(&walk, areq->assoclen);
+ if (count)
+ return -EINVAL;
+
+ count = output_len;
+ ret = artpec6_crypto_setup_sg_descrs_in(common, &walk, count);
+ if (ret)
+ return ret;
+
+ /* Put padding between the cryptotext and the auth tag */
+ if (!IS_ALIGNED(output_len, 16)) {
+ size_t crypto_pad = 16 - (output_len % 16);
+
+ ret = artpec6_crypto_setup_in_descr(common,
+ ac->pad_buffer,
+ crypto_pad, false);
+ if (ret)
+ return ret;
+ }
+
+ /* The authentication tag shall follow immediately after
+ * the output ciphertext. For decryption it is put in a context
+ * buffer for later compare against the input tag.
+ */
+ count = AES_BLOCK_SIZE;
+
+ if (req_ctx->decrypt) {
+ ret = artpec6_crypto_setup_in_descr(common,
+ req_ctx->decryption_tag, count, false);
+ if (ret)
+ return ret;
+
+ } else {
+ ret = artpec6_crypto_setup_sg_descrs_in(common, &walk,
+ count);
+ if (ret)
+ return ret;
+ }
+
+ }
+
+ ret = artpec6_crypto_terminate_in_descrs(common);
+ if (ret)
+ return ret;
+
+ ret = artpec6_crypto_terminate_out_descrs(common);
+ if (ret)
+ return ret;
+
+ return artpec6_crypto_dma_map_descs(common);
+}
+
+static void artpec6_crypto_process_queue(struct artpec6_crypto *ac)
+{
+ struct artpec6_crypto_req_common *req;
+
+ while (!list_empty(&ac->queue) && !artpec6_crypto_busy()) {
+ req = list_first_entry(&ac->queue,
+ struct artpec6_crypto_req_common,
+ list);
+ list_move_tail(&req->list, &ac->pending);
+ artpec6_crypto_start_dma(req);
+
+ req->req->complete(req->req, -EINPROGRESS);
+ }
+
+ /*
+ * In some cases, the hardware can raise an in_eop_flush interrupt
+ * before actually updating the status, so we have an timer which will
+ * recheck the status on timeout. Since the cases are expected to be
+ * very rare, we use a relatively large timeout value. There should be
+ * no noticeable negative effect if we timeout spuriously.
+ */
+ if (ac->pending_count)
+ mod_timer(&ac->timer, jiffies + msecs_to_jiffies(100));
+ else
+ del_timer(&ac->timer);
+}
+
+static void artpec6_crypto_timeout(unsigned long data)
+{
+ struct artpec6_crypto *ac = (struct artpec6_crypto *) data;
+
+ dev_info_ratelimited(artpec6_crypto_dev, "timeout\n");
+
+ tasklet_schedule(&ac->task);
+}
+
+static void artpec6_crypto_task(unsigned long data)
+{
+ struct artpec6_crypto *ac = (struct artpec6_crypto *)data;
+ struct artpec6_crypto_req_common *req;
+ struct artpec6_crypto_req_common *n;
+
+ if (list_empty(&ac->pending)) {
+ pr_debug("Spurious IRQ\n");
+ return;
+ }
+
+ spin_lock_bh(&ac->queue_lock);
+
+ list_for_each_entry_safe(req, n, &ac->pending, list) {
+ struct artpec6_crypto_dma_descriptors *dma = req->dma;
+ u32 stat;
+
+ dma_sync_single_for_cpu(artpec6_crypto_dev, dma->stat_dma_addr,
+ sizeof(dma->stat[0]),
+ DMA_BIDIRECTIONAL);
+
+ stat = req->dma->stat[req->dma->in_cnt-1];
+
+ /* A non-zero final status descriptor indicates
+ * this job has finished.
+ */
+ pr_debug("Request %p status is %X\n", req, stat);
+ if (!stat)
+ break;
+
+ /* Allow testing of timeout handling with fault injection */
+#ifdef CONFIG_FAULT_INJECTION
+ if (should_fail(&artpec6_crypto_fail_status_read, 1))
+ continue;
+#endif
+
+ pr_debug("Completing request %p\n", req);
+
+ list_del(&req->list);
+
+ artpec6_crypto_dma_unmap_all(req);
+ artpec6_crypto_copy_bounce_buffers(req);
+
+ ac->pending_count--;
+ artpec6_crypto_common_destroy(req);
+ req->complete(req->req);
+ }
+
+ artpec6_crypto_process_queue(ac);
+
+ spin_unlock_bh(&ac->queue_lock);
+}
+
+static void artpec6_crypto_complete_crypto(struct crypto_async_request *req)
+{
+ req->complete(req, 0);
+}
+
+static void
+artpec6_crypto_complete_cbc_decrypt(struct crypto_async_request *req)
+{
+ struct skcipher_request *cipher_req = container_of(req,
+ struct skcipher_request, base);
+
+ scatterwalk_map_and_copy(cipher_req->iv, cipher_req->src,
+ cipher_req->cryptlen - AES_BLOCK_SIZE,
+ AES_BLOCK_SIZE, 0);
+ req->complete(req, 0);
+}
+
+static void
+artpec6_crypto_complete_cbc_encrypt(struct crypto_async_request *req)
+{
+ struct skcipher_request *cipher_req = container_of(req,
+ struct skcipher_request, base);
+
+ scatterwalk_map_and_copy(cipher_req->iv, cipher_req->dst,
+ cipher_req->cryptlen - AES_BLOCK_SIZE,
+ AES_BLOCK_SIZE, 0);
+ req->complete(req, 0);
+}
+
+static void artpec6_crypto_complete_aead(struct crypto_async_request *req)
+{
+ int result = 0;
+
+ /* Verify GCM hashtag. */
+ struct aead_request *areq = container_of(req,
+ struct aead_request, base);
+ struct artpec6_crypto_aead_req_ctx *req_ctx = aead_request_ctx(areq);
+
+ if (req_ctx->decrypt) {
+ u8 input_tag[AES_BLOCK_SIZE];
+
+ sg_pcopy_to_buffer(areq->src,
+ sg_nents(areq->src),
+ input_tag,
+ AES_BLOCK_SIZE,
+ areq->assoclen + areq->cryptlen -
+ AES_BLOCK_SIZE);
+
+ if (memcmp(req_ctx->decryption_tag,
+ input_tag,
+ AES_BLOCK_SIZE)) {
+ pr_debug("***EBADMSG:\n");
+ print_hex_dump_debug("ref:", DUMP_PREFIX_ADDRESS, 32, 1,
+ input_tag, AES_BLOCK_SIZE, true);
+ print_hex_dump_debug("out:", DUMP_PREFIX_ADDRESS, 32, 1,
+ req_ctx->decryption_tag,
+ AES_BLOCK_SIZE, true);
+
+ result = -EBADMSG;
+ }
+ }
+
+ req->complete(req, result);
+}
+
+static void artpec6_crypto_complete_hash(struct crypto_async_request *req)
+{
+ req->complete(req, 0);
+}
+
+
+/*------------------- Hash functions -----------------------------------------*/
+static int
+artpec6_crypto_hash_set_key(struct crypto_ahash *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ struct artpec6_hashalg_context *tfm_ctx = crypto_tfm_ctx(&tfm->base);
+ size_t blocksize;
+ int ret;
+
+ if (!keylen) {
+ pr_err("Invalid length (%d) of HMAC key\n",
+ keylen);
+ return -EINVAL;
+ }
+
+ memset(tfm_ctx->hmac_key, 0, sizeof(tfm_ctx->hmac_key));
+
+ blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
+
+ if (keylen > blocksize) {
+ SHASH_DESC_ON_STACK(hdesc, tfm_ctx->child_hash);
+
+ hdesc->tfm = tfm_ctx->child_hash;
+ hdesc->flags = crypto_ahash_get_flags(tfm) &
+ CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ tfm_ctx->hmac_key_length = blocksize;
+ ret = crypto_shash_digest(hdesc, key, keylen,
+ tfm_ctx->hmac_key);
+ if (ret)
+ return ret;
+
+ } else {
+ memcpy(tfm_ctx->hmac_key, key, keylen);
+ tfm_ctx->hmac_key_length = keylen;
+ }
+
+ return 0;
+}
+
+static int
+artpec6_crypto_init_hash(struct ahash_request *req, u8 type, int hmac)
+{
+ struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
+ enum artpec6_crypto_variant variant = ac->variant;
+ struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
+ u32 oper;
+
+ memset(req_ctx, 0, sizeof(*req_ctx));
+
+ req_ctx->hash_flags = HASH_FLAG_INIT_CTX;
+ if (hmac)
+ req_ctx->hash_flags |= (HASH_FLAG_HMAC | HASH_FLAG_UPDATE_KEY);
+
+ switch (type) {
+ case ARTPEC6_CRYPTO_HASH_SHA1:
+ oper = hmac ? regk_crypto_hmac_sha1 : regk_crypto_sha1;
+ break;
+ case ARTPEC6_CRYPTO_HASH_SHA256:
+ oper = hmac ? regk_crypto_hmac_sha256 : regk_crypto_sha256;
+ break;
+ case ARTPEC6_CRYPTO_HASH_SHA384:
+ oper = hmac ? regk_crypto_hmac_sha384 : regk_crypto_sha384;
+ break;
+ case ARTPEC6_CRYPTO_HASH_SHA512:
+ oper = hmac ? regk_crypto_hmac_sha512 : regk_crypto_sha512;
+ break;
+
+ default:
+ pr_err("%s: Unsupported hash type 0x%x\n", MODULE_NAME, type);
+ return -EINVAL;
+ }
+
+ if (variant == ARTPEC6_CRYPTO)
+ req_ctx->hash_md = FIELD_PREP(A6_CRY_MD_OPER, oper);
+ else
+ req_ctx->hash_md = FIELD_PREP(A7_CRY_MD_OPER, oper);
+
+ return 0;
+}
+
+static int artpec6_crypto_prepare_submit_hash(struct ahash_request *req)
+{
+ struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
+ int ret;
+
+ if (!req_ctx->common.dma) {
+ ret = artpec6_crypto_common_init(&req_ctx->common,
+ &req->base,
+ artpec6_crypto_complete_hash,
+ NULL, 0);
+
+ if (ret)
+ return ret;
+ }
+
+ ret = artpec6_crypto_prepare_hash(req);
+ switch (ret) {
+ case ARTPEC6_CRYPTO_PREPARE_HASH_START:
+ ret = artpec6_crypto_submit(&req_ctx->common);
+ break;
+
+ case ARTPEC6_CRYPTO_PREPARE_HASH_NO_START:
+ ret = 0;
+ /* Fallthrough */
+
+ default:
+ artpec6_crypto_common_destroy(&req_ctx->common);
+ break;
+ }
+
+ return ret;
+}
+
+static int artpec6_crypto_hash_final(struct ahash_request *req)
+{
+ struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
+
+ req_ctx->hash_flags |= HASH_FLAG_FINALIZE;
+
+ return artpec6_crypto_prepare_submit_hash(req);
+}
+
+static int artpec6_crypto_hash_update(struct ahash_request *req)
+{
+ struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
+
+ req_ctx->hash_flags |= HASH_FLAG_UPDATE;
+
+ return artpec6_crypto_prepare_submit_hash(req);
+}
+
+static int artpec6_crypto_sha1_init(struct ahash_request *req)
+{
+ return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA1, 0);
+}
+
+static int artpec6_crypto_sha1_digest(struct ahash_request *req)
+{
+ struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
+
+ artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA1, 0);
+
+ req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE;
+
+ return artpec6_crypto_prepare_submit_hash(req);
+}
+
+static int artpec6_crypto_sha256_init(struct ahash_request *req)
+{
+ return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA256, 0);
+}
+
+static int artpec6_crypto_sha256_digest(struct ahash_request *req)
+{
+ struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
+
+ artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA256, 0);
+ req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE;
+
+ return artpec6_crypto_prepare_submit_hash(req);
+}
+
+static int __maybe_unused artpec6_crypto_sha384_init(struct ahash_request *req)
+{
+ return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA384, 0);
+}
+
+static int __maybe_unused
+artpec6_crypto_sha384_digest(struct ahash_request *req)
+{
+ struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
+
+ artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA384, 0);
+ req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE;
+
+ return artpec6_crypto_prepare_submit_hash(req);
+}
+
+static int artpec6_crypto_sha512_init(struct ahash_request *req)
+{
+ return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA512, 0);
+}
+
+static int artpec6_crypto_sha512_digest(struct ahash_request *req)
+{
+ struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
+
+ artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA512, 0);
+ req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE;
+
+ return artpec6_crypto_prepare_submit_hash(req);
+}
+
+static int artpec6_crypto_hmac_sha256_init(struct ahash_request *req)
+{
+ return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA256, 1);
+}
+
+static int __maybe_unused
+artpec6_crypto_hmac_sha384_init(struct ahash_request *req)
+{
+ return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA384, 1);
+}
+
+static int artpec6_crypto_hmac_sha512_init(struct ahash_request *req)
+{
+ return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA512, 1);
+}
+
+static int artpec6_crypto_hmac_sha256_digest(struct ahash_request *req)
+{
+ struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
+
+ artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA256, 1);
+ req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE;
+
+ return artpec6_crypto_prepare_submit_hash(req);
+}
+
+static int __maybe_unused
+artpec6_crypto_hmac_sha384_digest(struct ahash_request *req)
+{
+ struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
+
+ artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA384, 1);
+ req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE;
+
+ return artpec6_crypto_prepare_submit_hash(req);
+}
+
+static int artpec6_crypto_hmac_sha512_digest(struct ahash_request *req)
+{
+ struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
+
+ artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA512, 1);
+ req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE;
+
+ return artpec6_crypto_prepare_submit_hash(req);
+}
+
+static int artpec6_crypto_ahash_init_common(struct crypto_tfm *tfm,
+ const char *base_hash_name)
+{
+ struct artpec6_hashalg_context *tfm_ctx = crypto_tfm_ctx(tfm);
+
+ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+ sizeof(struct artpec6_hash_request_context));
+ memset(tfm_ctx, 0, sizeof(*tfm_ctx));
+
+ if (base_hash_name) {
+ struct crypto_shash *child;
+
+ child = crypto_alloc_shash(base_hash_name, 0,
+ CRYPTO_ALG_NEED_FALLBACK);
+
+ if (IS_ERR(child))
+ return PTR_ERR(child);
+
+ tfm_ctx->child_hash = child;
+ }
+
+ return 0;
+}
+
+static int artpec6_crypto_ahash_init(struct crypto_tfm *tfm)
+{
+ return artpec6_crypto_ahash_init_common(tfm, NULL);
+}
+
+static int artpec6_crypto_ahash_init_hmac_sha256(struct crypto_tfm *tfm)
+{
+ return artpec6_crypto_ahash_init_common(tfm, "sha256");
+}
+
+static int __maybe_unused
+artpec6_crypto_ahash_init_hmac_sha384(struct crypto_tfm *tfm)
+{
+ return artpec6_crypto_ahash_init_common(tfm, "sha384");
+}
+
+static int artpec6_crypto_ahash_init_hmac_sha512(struct crypto_tfm *tfm)
+{
+ return artpec6_crypto_ahash_init_common(tfm, "sha512");
+}
+
+static void artpec6_crypto_ahash_exit(struct crypto_tfm *tfm)
+{
+ struct artpec6_hashalg_context *tfm_ctx = crypto_tfm_ctx(tfm);
+
+ if (tfm_ctx->child_hash)
+ crypto_free_shash(tfm_ctx->child_hash);
+
+ memset(tfm_ctx->hmac_key, 0, sizeof(tfm_ctx->hmac_key));
+ tfm_ctx->hmac_key_length = 0;
+}
+
+static int artpec6_crypto_hash_export(struct ahash_request *req, void *out)
+{
+ const struct artpec6_hash_request_context *ctx = ahash_request_ctx(req);
+ struct artpec6_hash_export_state *state = out;
+ struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
+ enum artpec6_crypto_variant variant = ac->variant;
+
+ BUILD_BUG_ON(sizeof(state->partial_buffer) !=
+ sizeof(ctx->partial_buffer));
+ BUILD_BUG_ON(sizeof(state->digeststate) != sizeof(ctx->digeststate));
+
+ state->digcnt = ctx->digcnt;
+ state->partial_bytes = ctx->partial_bytes;
+ state->hash_flags = ctx->hash_flags;
+
+ if (variant == ARTPEC6_CRYPTO)
+ state->oper = FIELD_GET(A6_CRY_MD_OPER, ctx->hash_md);
+ else
+ state->oper = FIELD_GET(A7_CRY_MD_OPER, ctx->hash_md);
+
+ memcpy(state->partial_buffer, ctx->partial_buffer,
+ sizeof(state->partial_buffer));
+ memcpy(state->digeststate, ctx->digeststate,
+ sizeof(state->digeststate));
+
+ return 0;
+}
+
+static int artpec6_crypto_hash_import(struct ahash_request *req, const void *in)
+{
+ struct artpec6_hash_request_context *ctx = ahash_request_ctx(req);
+ const struct artpec6_hash_export_state *state = in;
+ struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
+ enum artpec6_crypto_variant variant = ac->variant;
+
+ memset(ctx, 0, sizeof(*ctx));
+
+ ctx->digcnt = state->digcnt;
+ ctx->partial_bytes = state->partial_bytes;
+ ctx->hash_flags = state->hash_flags;
+
+ if (variant == ARTPEC6_CRYPTO)
+ ctx->hash_md = FIELD_PREP(A6_CRY_MD_OPER, state->oper);
+ else
+ ctx->hash_md = FIELD_PREP(A7_CRY_MD_OPER, state->oper);
+
+ memcpy(ctx->partial_buffer, state->partial_buffer,
+ sizeof(state->partial_buffer));
+ memcpy(ctx->digeststate, state->digeststate,
+ sizeof(state->digeststate));
+
+ return 0;
+}
+
+static int init_crypto_hw(struct artpec6_crypto *ac)
+{
+ enum artpec6_crypto_variant variant = ac->variant;
+ void __iomem *base = ac->base;
+ u32 out_descr_buf_size;
+ u32 out_data_buf_size;
+ u32 in_data_buf_size;
+ u32 in_descr_buf_size;
+ u32 in_stat_buf_size;
+ u32 in, out;
+
+ /*
+ * The PDMA unit contains 1984 bytes of internal memory for the OUT
+ * channels and 1024 bytes for the IN channel. This is an elastic
+ * memory used to internally store the descriptors and data. The values
+ * ares specified in 64 byte incremements. Trustzone buffers are not
+ * used at this stage.
+ */
+ out_data_buf_size = 16; /* 1024 bytes for data */
+ out_descr_buf_size = 15; /* 960 bytes for descriptors */
+ in_data_buf_size = 8; /* 512 bytes for data */
+ in_descr_buf_size = 4; /* 256 bytes for descriptors */
+ in_stat_buf_size = 4; /* 256 bytes for stat descrs */
+
+ BUILD_BUG_ON_MSG((out_data_buf_size
+ + out_descr_buf_size) * 64 > 1984,
+ "Invalid OUT configuration");
+
+ BUILD_BUG_ON_MSG((in_data_buf_size
+ + in_descr_buf_size
+ + in_stat_buf_size) * 64 > 1024,
+ "Invalid IN configuration");
+
+ in = FIELD_PREP(PDMA_IN_BUF_CFG_DATA_BUF_SIZE, in_data_buf_size) |
+ FIELD_PREP(PDMA_IN_BUF_CFG_DESCR_BUF_SIZE, in_descr_buf_size) |
+ FIELD_PREP(PDMA_IN_BUF_CFG_STAT_BUF_SIZE, in_stat_buf_size);
+
+ out = FIELD_PREP(PDMA_OUT_BUF_CFG_DATA_BUF_SIZE, out_data_buf_size) |
+ FIELD_PREP(PDMA_OUT_BUF_CFG_DESCR_BUF_SIZE, out_descr_buf_size);
+
+ writel_relaxed(out, base + PDMA_OUT_BUF_CFG);
+ writel_relaxed(PDMA_OUT_CFG_EN, base + PDMA_OUT_CFG);
+
+ if (variant == ARTPEC6_CRYPTO) {
+ writel_relaxed(in, base + A6_PDMA_IN_BUF_CFG);
+ writel_relaxed(PDMA_IN_CFG_EN, base + A6_PDMA_IN_CFG);
+ writel_relaxed(A6_PDMA_INTR_MASK_IN_DATA |
+ A6_PDMA_INTR_MASK_IN_EOP_FLUSH,
+ base + A6_PDMA_INTR_MASK);
+ } else {
+ writel_relaxed(in, base + A7_PDMA_IN_BUF_CFG);
+ writel_relaxed(PDMA_IN_CFG_EN, base + A7_PDMA_IN_CFG);
+ writel_relaxed(A7_PDMA_INTR_MASK_IN_DATA |
+ A7_PDMA_INTR_MASK_IN_EOP_FLUSH,
+ base + A7_PDMA_INTR_MASK);
+ }
+
+ return 0;
+}
+
+static void artpec6_crypto_disable_hw(struct artpec6_crypto *ac)
+{
+ enum artpec6_crypto_variant variant = ac->variant;
+ void __iomem *base = ac->base;
+
+ if (variant == ARTPEC6_CRYPTO) {
+ writel_relaxed(A6_PDMA_IN_CMD_STOP, base + A6_PDMA_IN_CMD);
+ writel_relaxed(0, base + A6_PDMA_IN_CFG);
+ writel_relaxed(A6_PDMA_OUT_CMD_STOP, base + PDMA_OUT_CMD);
+ } else {
+ writel_relaxed(A7_PDMA_IN_CMD_STOP, base + A7_PDMA_IN_CMD);
+ writel_relaxed(0, base + A7_PDMA_IN_CFG);
+ writel_relaxed(A7_PDMA_OUT_CMD_STOP, base + PDMA_OUT_CMD);
+ }
+
+ writel_relaxed(0, base + PDMA_OUT_CFG);
+
+}
+
+static irqreturn_t artpec6_crypto_irq(int irq, void *dev_id)
+{
+ struct artpec6_crypto *ac = dev_id;
+ enum artpec6_crypto_variant variant = ac->variant;
+ void __iomem *base = ac->base;
+ u32 mask_in_data, mask_in_eop_flush;
+ u32 in_cmd_flush_stat, in_cmd_reg;
+ u32 ack_intr_reg;
+ u32 ack = 0;
+ u32 intr;
+
+ if (variant == ARTPEC6_CRYPTO) {
+ intr = readl_relaxed(base + A6_PDMA_MASKED_INTR);
+ mask_in_data = A6_PDMA_INTR_MASK_IN_DATA;
+ mask_in_eop_flush = A6_PDMA_INTR_MASK_IN_EOP_FLUSH;
+ in_cmd_flush_stat = A6_PDMA_IN_CMD_FLUSH_STAT;
+ in_cmd_reg = A6_PDMA_IN_CMD;
+ ack_intr_reg = A6_PDMA_ACK_INTR;
+ } else {
+ intr = readl_relaxed(base + A7_PDMA_MASKED_INTR);
+ mask_in_data = A7_PDMA_INTR_MASK_IN_DATA;
+ mask_in_eop_flush = A7_PDMA_INTR_MASK_IN_EOP_FLUSH;
+ in_cmd_flush_stat = A7_PDMA_IN_CMD_FLUSH_STAT;
+ in_cmd_reg = A7_PDMA_IN_CMD;
+ ack_intr_reg = A7_PDMA_ACK_INTR;
+ }
+
+ /* We get two interrupt notifications from each job.
+ * The in_data means all data was sent to memory and then
+ * we request a status flush command to write the per-job
+ * status to its status vector. This ensures that the
+ * tasklet can detect exactly how many submitted jobs
+ * that have finished.
+ */
+ if (intr & mask_in_data)
+ ack |= mask_in_data;
+
+ if (intr & mask_in_eop_flush)
+ ack |= mask_in_eop_flush;
+ else
+ writel_relaxed(in_cmd_flush_stat, base + in_cmd_reg);
+
+ writel_relaxed(ack, base + ack_intr_reg);
+
+ if (intr & mask_in_eop_flush)
+ tasklet_schedule(&ac->task);
+
+ return IRQ_HANDLED;
+}
+
+/*------------------- Algorithm definitions ----------------------------------*/
+
+/* Hashes */
+static struct ahash_alg hash_algos[] = {
+ /* SHA-1 */
+ {
+ .init = artpec6_crypto_sha1_init,
+ .update = artpec6_crypto_hash_update,
+ .final = artpec6_crypto_hash_final,
+ .digest = artpec6_crypto_sha1_digest,
+ .import = artpec6_crypto_hash_import,
+ .export = artpec6_crypto_hash_export,
+ .halg.digestsize = SHA1_DIGEST_SIZE,
+ .halg.statesize = sizeof(struct artpec6_hash_export_state),
+ .halg.base = {
+ .cra_name = "sha1",
+ .cra_driver_name = "artpec-sha1",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct artpec6_hashalg_context),
+ .cra_alignmask = 3,
+ .cra_module = THIS_MODULE,
+ .cra_init = artpec6_crypto_ahash_init,
+ .cra_exit = artpec6_crypto_ahash_exit,
+ }
+ },
+ /* SHA-256 */
+ {
+ .init = artpec6_crypto_sha256_init,
+ .update = artpec6_crypto_hash_update,
+ .final = artpec6_crypto_hash_final,
+ .digest = artpec6_crypto_sha256_digest,
+ .import = artpec6_crypto_hash_import,
+ .export = artpec6_crypto_hash_export,
+ .halg.digestsize = SHA256_DIGEST_SIZE,
+ .halg.statesize = sizeof(struct artpec6_hash_export_state),
+ .halg.base = {
+ .cra_name = "sha256",
+ .cra_driver_name = "artpec-sha256",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct artpec6_hashalg_context),
+ .cra_alignmask = 3,
+ .cra_module = THIS_MODULE,
+ .cra_init = artpec6_crypto_ahash_init,
+ .cra_exit = artpec6_crypto_ahash_exit,
+ }
+ },
+ /* HMAC SHA-256 */
+ {
+ .init = artpec6_crypto_hmac_sha256_init,
+ .update = artpec6_crypto_hash_update,
+ .final = artpec6_crypto_hash_final,
+ .digest = artpec6_crypto_hmac_sha256_digest,
+ .import = artpec6_crypto_hash_import,
+ .export = artpec6_crypto_hash_export,
+ .setkey = artpec6_crypto_hash_set_key,
+ .halg.digestsize = SHA256_DIGEST_SIZE,
+ .halg.statesize = sizeof(struct artpec6_hash_export_state),
+ .halg.base = {
+ .cra_name = "hmac(sha256)",
+ .cra_driver_name = "artpec-hmac-sha256",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct artpec6_hashalg_context),
+ .cra_alignmask = 3,
+ .cra_module = THIS_MODULE,
+ .cra_init = artpec6_crypto_ahash_init_hmac_sha256,
+ .cra_exit = artpec6_crypto_ahash_exit,
+ }
+ },
+};
+
+static struct ahash_alg artpec7_hash_algos[] = {
+ /* SHA-384 */
+ {
+ .init = artpec6_crypto_sha384_init,
+ .update = artpec6_crypto_hash_update,
+ .final = artpec6_crypto_hash_final,
+ .digest = artpec6_crypto_sha384_digest,
+ .import = artpec6_crypto_hash_import,
+ .export = artpec6_crypto_hash_export,
+ .halg.digestsize = SHA384_DIGEST_SIZE,
+ .halg.statesize = sizeof(struct artpec6_hash_export_state),
+ .halg.base = {
+ .cra_name = "sha384",
+ .cra_driver_name = "artpec-sha384",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA384_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct artpec6_hashalg_context),
+ .cra_alignmask = 3,
+ .cra_module = THIS_MODULE,
+ .cra_init = artpec6_crypto_ahash_init,
+ .cra_exit = artpec6_crypto_ahash_exit,
+ }
+ },
+ /* HMAC SHA-384 */
+ {
+ .init = artpec6_crypto_hmac_sha384_init,
+ .update = artpec6_crypto_hash_update,
+ .final = artpec6_crypto_hash_final,
+ .digest = artpec6_crypto_hmac_sha384_digest,
+ .import = artpec6_crypto_hash_import,
+ .export = artpec6_crypto_hash_export,
+ .setkey = artpec6_crypto_hash_set_key,
+ .halg.digestsize = SHA384_DIGEST_SIZE,
+ .halg.statesize = sizeof(struct artpec6_hash_export_state),
+ .halg.base = {
+ .cra_name = "hmac(sha384)",
+ .cra_driver_name = "artpec-hmac-sha384",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA384_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct artpec6_hashalg_context),
+ .cra_alignmask = 3,
+ .cra_module = THIS_MODULE,
+ .cra_init = artpec6_crypto_ahash_init_hmac_sha384,
+ .cra_exit = artpec6_crypto_ahash_exit,
+ }
+ },
+ /* SHA-512 */
+ {
+ .init = artpec6_crypto_sha512_init,
+ .update = artpec6_crypto_hash_update,
+ .final = artpec6_crypto_hash_final,
+ .digest = artpec6_crypto_sha512_digest,
+ .import = artpec6_crypto_hash_import,
+ .export = artpec6_crypto_hash_export,
+ .halg.digestsize = SHA512_DIGEST_SIZE,
+ .halg.statesize = sizeof(struct artpec6_hash_export_state),
+ .halg.base = {
+ .cra_name = "sha512",
+ .cra_driver_name = "artpec-sha512",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA512_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct artpec6_hashalg_context),
+ .cra_alignmask = 3,
+ .cra_module = THIS_MODULE,
+ .cra_init = artpec6_crypto_ahash_init,
+ .cra_exit = artpec6_crypto_ahash_exit,
+ }
+ },
+ /* HMAC SHA-512 */
+ {
+ .init = artpec6_crypto_hmac_sha512_init,
+ .update = artpec6_crypto_hash_update,
+ .final = artpec6_crypto_hash_final,
+ .digest = artpec6_crypto_hmac_sha512_digest,
+ .import = artpec6_crypto_hash_import,
+ .export = artpec6_crypto_hash_export,
+ .setkey = artpec6_crypto_hash_set_key,
+ .halg.digestsize = SHA512_DIGEST_SIZE,
+ .halg.statesize = sizeof(struct artpec6_hash_export_state),
+ .halg.base = {
+ .cra_name = "hmac(sha512)",
+ .cra_driver_name = "artpec-hmac-sha512",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA512_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct artpec6_hashalg_context),
+ .cra_alignmask = 3,
+ .cra_module = THIS_MODULE,
+ .cra_init = artpec6_crypto_ahash_init_hmac_sha512,
+ .cra_exit = artpec6_crypto_ahash_exit,
+ }
+ },
+};
+
+/* Crypto */
+static struct skcipher_alg crypto_algos[] = {
+ /* AES - ECB */
+ {
+ .base = {
+ .cra_name = "ecb(aes)",
+ .cra_driver_name = "artpec6-ecb-aes",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
+ .cra_alignmask = 3,
+ .cra_module = THIS_MODULE,
+ },
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = artpec6_crypto_cipher_set_key,
+ .encrypt = artpec6_crypto_encrypt,
+ .decrypt = artpec6_crypto_decrypt,
+ .init = artpec6_crypto_aes_ecb_init,
+ .exit = artpec6_crypto_aes_exit,
+ },
+ /* AES - CTR */
+ {
+ .base = {
+ .cra_name = "ctr(aes)",
+ .cra_driver_name = "artpec6-ctr-aes",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
+ .cra_alignmask = 3,
+ .cra_module = THIS_MODULE,
+ },
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = artpec6_crypto_cipher_set_key,
+ .encrypt = artpec6_crypto_ctr_encrypt,
+ .decrypt = artpec6_crypto_ctr_decrypt,
+ .init = artpec6_crypto_aes_ctr_init,
+ .exit = artpec6_crypto_aes_ctr_exit,
+ },
+ /* AES - CBC */
+ {
+ .base = {
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "artpec6-cbc-aes",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
+ .cra_alignmask = 3,
+ .cra_module = THIS_MODULE,
+ },
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = artpec6_crypto_cipher_set_key,
+ .encrypt = artpec6_crypto_encrypt,
+ .decrypt = artpec6_crypto_decrypt,
+ .init = artpec6_crypto_aes_cbc_init,
+ .exit = artpec6_crypto_aes_exit
+ },
+ /* AES - XTS */
+ {
+ .base = {
+ .cra_name = "xts(aes)",
+ .cra_driver_name = "artpec6-xts-aes",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
+ .cra_alignmask = 3,
+ .cra_module = THIS_MODULE,
+ },
+ .min_keysize = 2*AES_MIN_KEY_SIZE,
+ .max_keysize = 2*AES_MAX_KEY_SIZE,
+ .ivsize = 16,
+ .setkey = artpec6_crypto_xts_set_key,
+ .encrypt = artpec6_crypto_encrypt,
+ .decrypt = artpec6_crypto_decrypt,
+ .init = artpec6_crypto_aes_xts_init,
+ .exit = artpec6_crypto_aes_exit,
+ },
+};
+
+static struct aead_alg aead_algos[] = {
+ {
+ .init = artpec6_crypto_aead_init,
+ .setkey = artpec6_crypto_aead_set_key,
+ .encrypt = artpec6_crypto_aead_encrypt,
+ .decrypt = artpec6_crypto_aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = AES_BLOCK_SIZE,
+
+ .base = {
+ .cra_name = "gcm(aes)",
+ .cra_driver_name = "artpec-gcm-aes",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
+ .cra_alignmask = 3,
+ .cra_module = THIS_MODULE,
+ },
+ }
+};
+
+#ifdef CONFIG_DEBUG_FS
+
+struct dbgfs_u32 {
+ char *name;
+ mode_t mode;
+ u32 *flag;
+ char *desc;
+};
+
+static void artpec6_crypto_init_debugfs(void)
+{
+ dbgfs_root = debugfs_create_dir("artpec6_crypto", NULL);
+
+ if (!dbgfs_root || IS_ERR(dbgfs_root)) {
+ dbgfs_root = NULL;
+ pr_err("%s: Could not initialise debugfs!\n", MODULE_NAME);
+ return;
+ }
+
+#ifdef CONFIG_FAULT_INJECTION
+ fault_create_debugfs_attr("fail_status_read", dbgfs_root,
+ &artpec6_crypto_fail_status_read);
+
+ fault_create_debugfs_attr("fail_dma_array_full", dbgfs_root,
+ &artpec6_crypto_fail_dma_array_full);
+#endif
+}
+
+static void artpec6_crypto_free_debugfs(void)
+{
+ if (!dbgfs_root)
+ return;
+
+ debugfs_remove_recursive(dbgfs_root);
+ dbgfs_root = NULL;
+}
+#endif
+
+static const struct of_device_id artpec6_crypto_of_match[] = {
+ { .compatible = "axis,artpec6-crypto", .data = (void *)ARTPEC6_CRYPTO },
+ { .compatible = "axis,artpec7-crypto", .data = (void *)ARTPEC7_CRYPTO },
+ {}
+};
+MODULE_DEVICE_TABLE(of, artpec6_crypto_of_match);
+
+static int artpec6_crypto_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *match;
+ enum artpec6_crypto_variant variant;
+ struct artpec6_crypto *ac;
+ struct device *dev = &pdev->dev;
+ void __iomem *base;
+ struct resource *res;
+ int irq;
+ int err;
+
+ if (artpec6_crypto_dev)
+ return -ENODEV;
+
+ match = of_match_node(artpec6_crypto_of_match, dev->of_node);
+ if (!match)
+ return -EINVAL;
+
+ variant = (enum artpec6_crypto_variant)match->data;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return -ENODEV;
+
+ ac = devm_kzalloc(&pdev->dev, sizeof(struct artpec6_crypto),
+ GFP_KERNEL);
+ if (!ac)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, ac);
+ ac->variant = variant;
+
+ spin_lock_init(&ac->queue_lock);
+ INIT_LIST_HEAD(&ac->queue);
+ INIT_LIST_HEAD(&ac->pending);
+ setup_timer(&ac->timer, artpec6_crypto_timeout, (unsigned long) ac);
+
+ ac->base = base;
+
+ ac->dma_cache = kmem_cache_create("artpec6_crypto_dma",
+ sizeof(struct artpec6_crypto_dma_descriptors),
+ 64,
+ 0,
+ NULL);
+ if (!ac->dma_cache)
+ return -ENOMEM;
+
+#ifdef CONFIG_DEBUG_FS
+ artpec6_crypto_init_debugfs();
+#endif
+
+ tasklet_init(&ac->task, artpec6_crypto_task,
+ (unsigned long)ac);
+
+ ac->pad_buffer = devm_kzalloc(&pdev->dev, 2 * ARTPEC_CACHE_LINE_MAX,
+ GFP_KERNEL);
+ if (!ac->pad_buffer)
+ return -ENOMEM;
+ ac->pad_buffer = PTR_ALIGN(ac->pad_buffer, ARTPEC_CACHE_LINE_MAX);
+
+ ac->zero_buffer = devm_kzalloc(&pdev->dev, 2 * ARTPEC_CACHE_LINE_MAX,
+ GFP_KERNEL);
+ if (!ac->zero_buffer)
+ return -ENOMEM;
+ ac->zero_buffer = PTR_ALIGN(ac->zero_buffer, ARTPEC_CACHE_LINE_MAX);
+
+ err = init_crypto_hw(ac);
+ if (err)
+ goto free_cache;
+
+ err = devm_request_irq(&pdev->dev, irq, artpec6_crypto_irq, 0,
+ "artpec6-crypto", ac);
+ if (err)
+ goto disable_hw;
+
+ artpec6_crypto_dev = &pdev->dev;
+
+ err = crypto_register_ahashes(hash_algos, ARRAY_SIZE(hash_algos));
+ if (err) {
+ dev_err(dev, "Failed to register ahashes\n");
+ goto disable_hw;
+ }
+
+ if (variant != ARTPEC6_CRYPTO) {
+ err = crypto_register_ahashes(artpec7_hash_algos,
+ ARRAY_SIZE(artpec7_hash_algos));
+ if (err) {
+ dev_err(dev, "Failed to register ahashes\n");
+ goto unregister_ahashes;
+ }
+ }
+
+ err = crypto_register_skciphers(crypto_algos, ARRAY_SIZE(crypto_algos));
+ if (err) {
+ dev_err(dev, "Failed to register ciphers\n");
+ goto unregister_a7_ahashes;
+ }
+
+ err = crypto_register_aeads(aead_algos, ARRAY_SIZE(aead_algos));
+ if (err) {
+ dev_err(dev, "Failed to register aeads\n");
+ goto unregister_algs;
+ }
+
+ return 0;
+
+unregister_algs:
+ crypto_unregister_skciphers(crypto_algos, ARRAY_SIZE(crypto_algos));
+unregister_a7_ahashes:
+ if (variant != ARTPEC6_CRYPTO)
+ crypto_unregister_ahashes(artpec7_hash_algos,
+ ARRAY_SIZE(artpec7_hash_algos));
+unregister_ahashes:
+ crypto_unregister_ahashes(hash_algos, ARRAY_SIZE(hash_algos));
+disable_hw:
+ artpec6_crypto_disable_hw(ac);
+free_cache:
+ kmem_cache_destroy(ac->dma_cache);
+ return err;
+}
+
+static int artpec6_crypto_remove(struct platform_device *pdev)
+{
+ struct artpec6_crypto *ac = platform_get_drvdata(pdev);
+ int irq = platform_get_irq(pdev, 0);
+
+ crypto_unregister_ahashes(hash_algos, ARRAY_SIZE(hash_algos));
+ if (ac->variant != ARTPEC6_CRYPTO)
+ crypto_unregister_ahashes(artpec7_hash_algos,
+ ARRAY_SIZE(artpec7_hash_algos));
+ crypto_unregister_skciphers(crypto_algos, ARRAY_SIZE(crypto_algos));
+ crypto_unregister_aeads(aead_algos, ARRAY_SIZE(aead_algos));
+
+ tasklet_disable(&ac->task);
+ devm_free_irq(&pdev->dev, irq, ac);
+ tasklet_kill(&ac->task);
+ del_timer_sync(&ac->timer);
+
+ artpec6_crypto_disable_hw(ac);
+
+ kmem_cache_destroy(ac->dma_cache);
+#ifdef CONFIG_DEBUG_FS
+ artpec6_crypto_free_debugfs();
+#endif
+ return 0;
+}
+
+static struct platform_driver artpec6_crypto_driver = {
+ .probe = artpec6_crypto_probe,
+ .remove = artpec6_crypto_remove,
+ .driver = {
+ .name = "artpec6-crypto",
+ .owner = THIS_MODULE,
+ .of_match_table = artpec6_crypto_of_match,
+ },
+};
+
+module_platform_driver(artpec6_crypto_driver);
+
+MODULE_AUTHOR("Axis Communications AB");
+MODULE_DESCRIPTION("ARTPEC-6 Crypto driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c
index 9cfd36c1bcb6..8685c7e4debd 100644
--- a/drivers/crypto/bcm/cipher.c
+++ b/drivers/crypto/bcm/cipher.c
@@ -90,8 +90,6 @@ static int aead_pri = 150;
module_param(aead_pri, int, 0644);
MODULE_PARM_DESC(aead_pri, "Priority for AEAD algos");
-#define MAX_SPUS 16
-
/* A type 3 BCM header, expected to precede the SPU header for SPU-M.
* Bits 3 and 4 in the first byte encode the channel number (the dma ringset).
* 0x60 - ring 0
@@ -120,7 +118,7 @@ static u8 select_channel(void)
{
u8 chan_idx = atomic_inc_return(&iproc_priv.next_chan);
- return chan_idx % iproc_priv.spu.num_spu;
+ return chan_idx % iproc_priv.spu.num_chan;
}
/**
@@ -4528,8 +4526,13 @@ static void spu_functions_register(struct device *dev,
*/
static int spu_mb_init(struct device *dev)
{
- struct mbox_client *mcl = &iproc_priv.mcl[iproc_priv.spu.num_spu];
- int err;
+ struct mbox_client *mcl = &iproc_priv.mcl;
+ int err, i;
+
+ iproc_priv.mbox = devm_kcalloc(dev, iproc_priv.spu.num_chan,
+ sizeof(struct mbox_chan *), GFP_KERNEL);
+ if (!iproc_priv.mbox)
+ return -ENOMEM;
mcl->dev = dev;
mcl->tx_block = false;
@@ -4538,25 +4541,33 @@ static int spu_mb_init(struct device *dev)
mcl->rx_callback = spu_rx_callback;
mcl->tx_done = NULL;
- iproc_priv.mbox[iproc_priv.spu.num_spu] =
- mbox_request_channel(mcl, 0);
- if (IS_ERR(iproc_priv.mbox[iproc_priv.spu.num_spu])) {
- err = (int)PTR_ERR(iproc_priv.mbox[iproc_priv.spu.num_spu]);
- dev_err(dev,
- "Mbox channel %d request failed with err %d",
- iproc_priv.spu.num_spu, err);
- iproc_priv.mbox[iproc_priv.spu.num_spu] = NULL;
- return err;
+ for (i = 0; i < iproc_priv.spu.num_chan; i++) {
+ iproc_priv.mbox[i] = mbox_request_channel(mcl, i);
+ if (IS_ERR(iproc_priv.mbox[i])) {
+ err = (int)PTR_ERR(iproc_priv.mbox[i]);
+ dev_err(dev,
+ "Mbox channel %d request failed with err %d",
+ i, err);
+ iproc_priv.mbox[i] = NULL;
+ goto free_channels;
+ }
}
return 0;
+free_channels:
+ for (i = 0; i < iproc_priv.spu.num_chan; i++) {
+ if (iproc_priv.mbox[i])
+ mbox_free_channel(iproc_priv.mbox[i]);
+ }
+
+ return err;
}
static void spu_mb_release(struct platform_device *pdev)
{
int i;
- for (i = 0; i < iproc_priv.spu.num_spu; i++)
+ for (i = 0; i < iproc_priv.spu.num_chan; i++)
mbox_free_channel(iproc_priv.mbox[i]);
}
@@ -4567,7 +4578,7 @@ static void spu_counters_init(void)
atomic_set(&iproc_priv.session_count, 0);
atomic_set(&iproc_priv.stream_count, 0);
- atomic_set(&iproc_priv.next_chan, (int)iproc_priv.spu.num_spu);
+ atomic_set(&iproc_priv.next_chan, (int)iproc_priv.spu.num_chan);
atomic64_set(&iproc_priv.bytes_in, 0);
atomic64_set(&iproc_priv.bytes_out, 0);
for (i = 0; i < SPU_OP_NUM; i++) {
@@ -4809,47 +4820,38 @@ static int spu_dt_read(struct platform_device *pdev)
struct resource *spu_ctrl_regs;
const struct of_device_id *match;
const struct spu_type_subtype *matched_spu_type;
- void __iomem *spu_reg_vbase[MAX_SPUS];
- int err;
+ struct device_node *dn = pdev->dev.of_node;
+ int err, i;
- match = of_match_device(of_match_ptr(bcm_spu_dt_ids), dev);
- matched_spu_type = match->data;
+ /* Count number of mailbox channels */
+ spu->num_chan = of_count_phandle_with_args(dn, "mboxes", "#mbox-cells");
- if (iproc_priv.spu.num_spu > 1) {
- /* If this is 2nd or later SPU, make sure it's same type */
- if ((spu->spu_type != matched_spu_type->type) ||
- (spu->spu_subtype != matched_spu_type->subtype)) {
- err = -EINVAL;
- dev_err(&pdev->dev, "Multiple SPU types not allowed");
- return err;
- }
- } else {
- /* Record type of first SPU */
- spu->spu_type = matched_spu_type->type;
- spu->spu_subtype = matched_spu_type->subtype;
+ match = of_match_device(of_match_ptr(bcm_spu_dt_ids), dev);
+ if (!match) {
+ dev_err(&pdev->dev, "Failed to match device\n");
+ return -ENODEV;
}
- /* Get and map SPU registers */
- spu_ctrl_regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!spu_ctrl_regs) {
- err = -EINVAL;
- dev_err(&pdev->dev, "Invalid/missing registers for SPU\n");
- return err;
- }
+ matched_spu_type = match->data;
- spu_reg_vbase[iproc_priv.spu.num_spu] =
- devm_ioremap_resource(dev, spu_ctrl_regs);
- if (IS_ERR(spu_reg_vbase[iproc_priv.spu.num_spu])) {
- err = PTR_ERR(spu_reg_vbase[iproc_priv.spu.num_spu]);
- dev_err(&pdev->dev, "Failed to map registers: %d\n",
- err);
- spu_reg_vbase[iproc_priv.spu.num_spu] = NULL;
- return err;
- }
+ spu->spu_type = matched_spu_type->type;
+ spu->spu_subtype = matched_spu_type->subtype;
- dev_dbg(dev, "SPU %d detected.", iproc_priv.spu.num_spu);
+ i = 0;
+ for (i = 0; (i < MAX_SPUS) && ((spu_ctrl_regs =
+ platform_get_resource(pdev, IORESOURCE_MEM, i)) != NULL); i++) {
- spu->reg_vbase[iproc_priv.spu.num_spu] = spu_reg_vbase;
+ spu->reg_vbase[i] = devm_ioremap_resource(dev, spu_ctrl_regs);
+ if (IS_ERR(spu->reg_vbase[i])) {
+ err = PTR_ERR(spu->reg_vbase[i]);
+ dev_err(&pdev->dev, "Failed to map registers: %d\n",
+ err);
+ spu->reg_vbase[i] = NULL;
+ return err;
+ }
+ }
+ spu->num_spu = i;
+ dev_dbg(dev, "Device has %d SPUs", spu->num_spu);
return 0;
}
@@ -4860,8 +4862,8 @@ int bcm_spu_probe(struct platform_device *pdev)
struct spu_hw *spu = &iproc_priv.spu;
int err = 0;
- iproc_priv.pdev[iproc_priv.spu.num_spu] = pdev;
- platform_set_drvdata(iproc_priv.pdev[iproc_priv.spu.num_spu],
+ iproc_priv.pdev = pdev;
+ platform_set_drvdata(iproc_priv.pdev,
&iproc_priv);
err = spu_dt_read(pdev);
@@ -4872,12 +4874,6 @@ int bcm_spu_probe(struct platform_device *pdev)
if (err < 0)
goto failure;
- iproc_priv.spu.num_spu++;
-
- /* If already initialized, we've just added another SPU and are done */
- if (iproc_priv.inited)
- return 0;
-
if (spu->spu_type == SPU_TYPE_SPUM)
iproc_priv.bcm_hdr_len = 8;
else if (spu->spu_type == SPU_TYPE_SPU2)
@@ -4893,8 +4889,6 @@ int bcm_spu_probe(struct platform_device *pdev)
if (err < 0)
goto fail_reg;
- iproc_priv.inited = true;
-
return 0;
fail_reg:
diff --git a/drivers/crypto/bcm/cipher.h b/drivers/crypto/bcm/cipher.h
index 51dca529ce8f..57a55eb2a255 100644
--- a/drivers/crypto/bcm/cipher.h
+++ b/drivers/crypto/bcm/cipher.h
@@ -427,10 +427,13 @@ struct spu_hw {
/* The number of SPUs on this platform */
u32 num_spu;
+
+ /* The number of SPU channels on this platform */
+ u32 num_chan;
};
struct device_private {
- struct platform_device *pdev[MAX_SPUS];
+ struct platform_device *pdev;
struct spu_hw spu;
@@ -470,12 +473,10 @@ struct device_private {
/* Number of ICV check failures for AEAD messages */
atomic_t bad_icv;
- struct mbox_client mcl[MAX_SPUS];
- /* Array of mailbox channel pointers, one for each channel */
- struct mbox_chan *mbox[MAX_SPUS];
+ struct mbox_client mcl;
- /* Driver initialized */
- bool inited;
+ /* Array of mailbox channel pointers, one for each channel */
+ struct mbox_chan **mbox;
};
extern struct device_private iproc_priv;
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index 0488b7f81dcf..54f3b375a453 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -81,40 +81,6 @@
#define debug(format, arg...)
#endif
-#ifdef DEBUG
-#include <linux/highmem.h>
-
-static void dbg_dump_sg(const char *level, const char *prefix_str,
- int prefix_type, int rowsize, int groupsize,
- struct scatterlist *sg, size_t tlen, bool ascii)
-{
- struct scatterlist *it;
- void *it_page;
- size_t len;
- void *buf;
-
- for (it = sg; it != NULL && tlen > 0 ; it = sg_next(sg)) {
- /*
- * make sure the scatterlist's page
- * has a valid virtual memory mapping
- */
- it_page = kmap_atomic(sg_page(it));
- if (unlikely(!it_page)) {
- printk(KERN_ERR "dbg_dump_sg: kmap failed\n");
- return;
- }
-
- buf = it_page + it->offset;
- len = min_t(size_t, tlen, it->length);
- print_hex_dump(level, prefix_str, prefix_type, rowsize,
- groupsize, buf, len, ascii);
- tlen -= len;
-
- kunmap_atomic(it_page);
- }
-}
-#endif
-
static struct list_head alg_list;
struct caam_alg_entry {
@@ -898,10 +864,10 @@ static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->info,
edesc->src_nents > 1 ? 100 : ivsize, 1);
- dbg_dump_sg(KERN_ERR, "dst @"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
- edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
#endif
+ caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
+ edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
ablkcipher_unmap(jrdev, edesc, req);
@@ -937,10 +903,10 @@ static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->info,
ivsize, 1);
- dbg_dump_sg(KERN_ERR, "dst @"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
- edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
#endif
+ caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
+ edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
ablkcipher_unmap(jrdev, edesc, req);
@@ -1107,10 +1073,10 @@ static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
ivsize, 1);
pr_err("asked=%d, nbytes%d\n",
(int)edesc->src_nents > 1 ? 100 : req->nbytes, req->nbytes);
- dbg_dump_sg(KERN_ERR, "src @"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, req->src,
- edesc->src_nents > 1 ? 100 : req->nbytes, 1);
#endif
+ caam_dump_sg(KERN_ERR, "src @" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->src,
+ edesc->src_nents > 1 ? 100 : req->nbytes, 1);
len = desc_len(sh_desc);
init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
@@ -1164,10 +1130,10 @@ static void init_ablkcipher_giv_job(u32 *sh_desc, dma_addr_t ptr,
print_hex_dump(KERN_ERR, "presciv@" __stringify(__LINE__) ": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->info,
ivsize, 1);
- dbg_dump_sg(KERN_ERR, "src @" __stringify(__LINE__) ": ",
- DUMP_PREFIX_ADDRESS, 16, 4, req->src,
- edesc->src_nents > 1 ? 100 : req->nbytes, 1);
#endif
+ caam_dump_sg(KERN_ERR, "src @" __stringify(__LINE__) ": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->src,
+ edesc->src_nents > 1 ? 100 : req->nbytes, 1);
len = desc_len(sh_desc);
init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
@@ -1449,11 +1415,9 @@ static int aead_decrypt(struct aead_request *req)
u32 *desc;
int ret = 0;
-#ifdef DEBUG
- dbg_dump_sg(KERN_ERR, "dec src@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, req->src,
- req->assoclen + req->cryptlen, 1);
-#endif
+ caam_dump_sg(KERN_ERR, "dec src@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->src,
+ req->assoclen + req->cryptlen, 1);
/* allocate extended descriptor */
edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN,
diff --git a/drivers/crypto/caam/caamalg_desc.c b/drivers/crypto/caam/caamalg_desc.c
index 6f9c7ec0e339..530c14ee32de 100644
--- a/drivers/crypto/caam/caamalg_desc.c
+++ b/drivers/crypto/caam/caamalg_desc.c
@@ -599,7 +599,7 @@ void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata,
/* skip key loading if they are loaded due to sharing */
key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
- JUMP_COND_SHRD | JUMP_COND_SELF);
+ JUMP_COND_SHRD);
if (cdata->key_inline)
append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
@@ -688,8 +688,7 @@ void cnstr_shdsc_gcm_decap(u32 * const desc, struct alginfo *cdata,
/* skip key loading if they are loaded due to sharing */
key_jump_cmd = append_jump(desc, JUMP_JSL |
- JUMP_TEST_ALL | JUMP_COND_SHRD |
- JUMP_COND_SELF);
+ JUMP_TEST_ALL | JUMP_COND_SHRD);
if (cdata->key_inline)
append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c
index 78c4c0485c58..2eefc4a26bc2 100644
--- a/drivers/crypto/caam/caamalg_qi.c
+++ b/drivers/crypto/caam/caamalg_qi.c
@@ -12,7 +12,6 @@
#include "intern.h"
#include "desc_constr.h"
#include "error.h"
-#include "sg_sw_sec4.h"
#include "sg_sw_qm.h"
#include "key_gen.h"
#include "qi.h"
@@ -399,6 +398,7 @@ badkey:
* @iv_dma: dma address of iv for checking continuity and link table
* @qm_sg_bytes: length of dma mapped h/w link table
* @qm_sg_dma: bus physical mapped address of h/w link table
+ * @assoclen: associated data length, in CAAM endianness
* @assoclen_dma: bus physical mapped address of req->assoclen
* @drv_req: driver-specific request structure
* @sgt: the h/w link table
@@ -409,8 +409,12 @@ struct aead_edesc {
dma_addr_t iv_dma;
int qm_sg_bytes;
dma_addr_t qm_sg_dma;
+ unsigned int assoclen;
dma_addr_t assoclen_dma;
struct caam_drv_req drv_req;
+#define CAAM_QI_MAX_AEAD_SG \
+ ((CAAM_QI_MEMCACHE_SIZE - offsetof(struct aead_edesc, sgt)) / \
+ sizeof(struct qm_sg_entry))
struct qm_sg_entry sgt[0];
};
@@ -431,6 +435,9 @@ struct ablkcipher_edesc {
int qm_sg_bytes;
dma_addr_t qm_sg_dma;
struct caam_drv_req drv_req;
+#define CAAM_QI_MAX_ABLKCIPHER_SG \
+ ((CAAM_QI_MEMCACHE_SIZE - offsetof(struct ablkcipher_edesc, sgt)) / \
+ sizeof(struct qm_sg_entry))
struct qm_sg_entry sgt[0];
};
@@ -660,6 +667,14 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
*/
qm_sg_ents = 1 + !!ivsize + mapped_src_nents +
(mapped_dst_nents > 1 ? mapped_dst_nents : 0);
+ if (unlikely(qm_sg_ents > CAAM_QI_MAX_AEAD_SG)) {
+ dev_err(qidev, "Insufficient S/G entries: %d > %lu\n",
+ qm_sg_ents, CAAM_QI_MAX_AEAD_SG);
+ caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
+ iv_dma, ivsize, op_type, 0, 0);
+ qi_cache_free(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
sg_table = &edesc->sgt[0];
qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
@@ -670,7 +685,8 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
edesc->drv_req.cbk = aead_done;
edesc->drv_req.drv_ctx = drv_ctx;
- edesc->assoclen_dma = dma_map_single(qidev, &req->assoclen, 4,
+ edesc->assoclen = cpu_to_caam32(req->assoclen);
+ edesc->assoclen_dma = dma_map_single(qidev, &edesc->assoclen, 4,
DMA_TO_DEVICE);
if (dma_mapping_error(qidev, edesc->assoclen_dma)) {
dev_err(qidev, "unable to map assoclen\n");
@@ -776,9 +792,9 @@ static void ablkcipher_done(struct caam_drv_req *drv_req, u32 status)
struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
struct caam_ctx *caam_ctx = crypto_ablkcipher_ctx(ablkcipher);
struct device *qidev = caam_ctx->qidev;
-#ifdef DEBUG
int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+#ifdef DEBUG
dev_err(qidev, "%s %d: status 0x%x\n", __func__, __LINE__, status);
#endif
@@ -791,14 +807,21 @@ static void ablkcipher_done(struct caam_drv_req *drv_req, u32 status)
print_hex_dump(KERN_ERR, "dstiv @" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->info,
edesc->src_nents > 1 ? 100 : ivsize, 1);
- dbg_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
- edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
+ caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
+ edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
#endif
ablkcipher_unmap(qidev, edesc, req);
qi_cache_free(edesc);
+ /*
+ * The crypto API expects us to set the IV (req->info) to the last
+ * ciphertext block. This is used e.g. by the CTS mode.
+ */
+ scatterwalk_map_and_copy(req->info, req->dst, req->nbytes - ivsize,
+ ivsize, 0);
+
ablkcipher_request_complete(req, status);
}
@@ -880,6 +903,15 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
}
dst_sg_idx = qm_sg_ents;
+ qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
+ if (unlikely(qm_sg_ents > CAAM_QI_MAX_ABLKCIPHER_SG)) {
+ dev_err(qidev, "Insufficient S/G entries: %d > %lu\n",
+ qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG);
+ caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
+ iv_dma, ivsize, op_type, 0, 0);
+ return ERR_PTR(-ENOMEM);
+ }
+
/* allocate space for base edesc and link tables */
edesc = qi_cache_alloc(GFP_DMA | flags);
if (unlikely(!edesc)) {
@@ -892,7 +924,6 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
edesc->src_nents = src_nents;
edesc->dst_nents = dst_nents;
edesc->iv_dma = iv_dma;
- qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
sg_table = &edesc->sgt[0];
edesc->qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
edesc->drv_req.app_ctx = req;
@@ -1026,6 +1057,14 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
qm_sg_ents += 1 + mapped_dst_nents;
}
+ if (unlikely(qm_sg_ents > CAAM_QI_MAX_ABLKCIPHER_SG)) {
+ dev_err(qidev, "Insufficient S/G entries: %d > %lu\n",
+ qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG);
+ caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
+ iv_dma, ivsize, GIVENCRYPT, 0, 0);
+ return ERR_PTR(-ENOMEM);
+ }
+
/* allocate space for base edesc and link tables */
edesc = qi_cache_alloc(GFP_DMA | flags);
if (!edesc) {
@@ -1968,7 +2007,7 @@ static struct caam_aead_alg driver_aeads[] = {
.cra_name = "echainiv(authenc(hmac(sha256),"
"cbc(des)))",
.cra_driver_name = "echainiv-authenc-"
- "hmac-sha256-cbc-desi-"
+ "hmac-sha256-cbc-des-"
"caam-qi",
.cra_blocksize = DES_BLOCK_SIZE,
},
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index 910ec61cae09..698580b60b2f 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -791,8 +791,8 @@ static int ahash_update_ctx(struct ahash_request *req)
to_hash - *buflen,
*next_buflen, 0);
} else {
- (edesc->sec4_sg + sec4_sg_src_index - 1)->len |=
- cpu_to_caam32(SEC4_SG_LEN_FIN);
+ sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index -
+ 1);
}
desc = edesc->hw_desc;
@@ -882,8 +882,7 @@ static int ahash_final_ctx(struct ahash_request *req)
if (ret)
goto unmap_ctx;
- (edesc->sec4_sg + sec4_sg_src_index - 1)->len |=
- cpu_to_caam32(SEC4_SG_LEN_FIN);
+ sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index - 1);
edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
sec4_sg_bytes, DMA_TO_DEVICE);
diff --git a/drivers/crypto/caam/caamrng.c b/drivers/crypto/caam/caamrng.c
index 41398da3edf4..fde07d4ff019 100644
--- a/drivers/crypto/caam/caamrng.c
+++ b/drivers/crypto/caam/caamrng.c
@@ -285,11 +285,7 @@ static int caam_init_rng(struct caam_rng_ctx *ctx, struct device *jrdev)
if (err)
return err;
- err = caam_init_buf(ctx, 1);
- if (err)
- return err;
-
- return 0;
+ return caam_init_buf(ctx, 1);
}
static struct hwrng caam_rng = {
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index dd353e342c12..dacb53fb690e 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -17,6 +17,8 @@
bool caam_little_end;
EXPORT_SYMBOL(caam_little_end);
+bool caam_dpaa2;
+EXPORT_SYMBOL(caam_dpaa2);
#ifdef CONFIG_CAAM_QI
#include "qi.h"
@@ -319,8 +321,11 @@ static int caam_remove(struct platform_device *pdev)
caam_qi_shutdown(ctrlpriv->qidev);
#endif
- /* De-initialize RNG state handles initialized by this driver. */
- if (ctrlpriv->rng4_sh_init)
+ /*
+ * De-initialize RNG state handles initialized by this driver.
+ * In case of DPAA 2.x, RNG is managed by MC firmware.
+ */
+ if (!caam_dpaa2 && ctrlpriv->rng4_sh_init)
deinstantiate_rng(ctrldev, ctrlpriv->rng4_sh_init);
/* Shut down debug views */
@@ -444,7 +449,6 @@ static int caam_probe(struct platform_device *pdev)
dev = &pdev->dev;
dev_set_drvdata(dev, ctrlpriv);
- ctrlpriv->pdev = pdev;
nprop = pdev->dev.of_node;
/* Enable clocking */
@@ -553,12 +557,17 @@ static int caam_probe(struct platform_device *pdev)
/*
* Enable DECO watchdogs and, if this is a PHYS_ADDR_T_64BIT kernel,
- * long pointers in master configuration register
+ * long pointers in master configuration register.
+ * In case of DPAA 2.x, Management Complex firmware performs
+ * the configuration.
*/
- clrsetbits_32(&ctrl->mcr, MCFGR_AWCACHE_MASK | MCFGR_LONG_PTR,
- MCFGR_AWCACHE_CACH | MCFGR_AWCACHE_BUFF |
- MCFGR_WDENABLE | MCFGR_LARGE_BURST |
- (sizeof(dma_addr_t) == sizeof(u64) ? MCFGR_LONG_PTR : 0));
+ caam_dpaa2 = !!(comp_params & CTPR_MS_DPAA2);
+ if (!caam_dpaa2)
+ clrsetbits_32(&ctrl->mcr, MCFGR_AWCACHE_MASK | MCFGR_LONG_PTR,
+ MCFGR_AWCACHE_CACH | MCFGR_AWCACHE_BUFF |
+ MCFGR_WDENABLE | MCFGR_LARGE_BURST |
+ (sizeof(dma_addr_t) == sizeof(u64) ?
+ MCFGR_LONG_PTR : 0));
/*
* Read the Compile Time paramters and SCFGR to determine
@@ -587,7 +596,9 @@ static int caam_probe(struct platform_device *pdev)
JRSTART_JR3_START);
if (sizeof(dma_addr_t) == sizeof(u64)) {
- if (of_device_is_compatible(nprop, "fsl,sec-v5.0"))
+ if (caam_dpaa2)
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(49));
+ else if (of_device_is_compatible(nprop, "fsl,sec-v5.0"))
ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
else
ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(36));
@@ -630,11 +641,9 @@ static int caam_probe(struct platform_device *pdev)
ring++;
}
- /* Check to see if QI present. If so, enable */
- ctrlpriv->qi_present =
- !!(rd_reg32(&ctrl->perfmon.comp_parms_ms) &
- CTPR_MS_QI_MASK);
- if (ctrlpriv->qi_present) {
+ /* Check to see if (DPAA 1.x) QI present. If so, enable */
+ ctrlpriv->qi_present = !!(comp_params & CTPR_MS_QI_MASK);
+ if (ctrlpriv->qi_present && !caam_dpaa2) {
ctrlpriv->qi = (struct caam_queue_if __iomem __force *)
((__force uint8_t *)ctrl +
BLOCK_OFFSET * QI_BLOCK_NUMBER
@@ -662,8 +671,10 @@ static int caam_probe(struct platform_device *pdev)
/*
* If SEC has RNG version >= 4 and RNG state handle has not been
* already instantiated, do RNG instantiation
+ * In case of DPAA 2.x, RNG is managed by MC firmware.
*/
- if ((cha_vid_ls & CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT >= 4) {
+ if (!caam_dpaa2 &&
+ (cha_vid_ls & CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT >= 4) {
ctrlpriv->rng4_sh_init =
rd_reg32(&ctrl->r4tst[0].rdsta);
/*
@@ -731,63 +742,43 @@ static int caam_probe(struct platform_device *pdev)
/* Report "alive" for developer to see */
dev_info(dev, "device ID = 0x%016llx (Era %d)\n", caam_id,
caam_get_era());
- dev_info(dev, "job rings = %d, qi = %d\n",
- ctrlpriv->total_jobrs, ctrlpriv->qi_present);
+ dev_info(dev, "job rings = %d, qi = %d, dpaa2 = %s\n",
+ ctrlpriv->total_jobrs, ctrlpriv->qi_present,
+ caam_dpaa2 ? "yes" : "no");
#ifdef CONFIG_DEBUG_FS
-
- ctrlpriv->ctl_rq_dequeued =
- debugfs_create_file("rq_dequeued",
- S_IRUSR | S_IRGRP | S_IROTH,
- ctrlpriv->ctl, &perfmon->req_dequeued,
- &caam_fops_u64_ro);
- ctrlpriv->ctl_ob_enc_req =
- debugfs_create_file("ob_rq_encrypted",
- S_IRUSR | S_IRGRP | S_IROTH,
- ctrlpriv->ctl, &perfmon->ob_enc_req,
- &caam_fops_u64_ro);
- ctrlpriv->ctl_ib_dec_req =
- debugfs_create_file("ib_rq_decrypted",
- S_IRUSR | S_IRGRP | S_IROTH,
- ctrlpriv->ctl, &perfmon->ib_dec_req,
- &caam_fops_u64_ro);
- ctrlpriv->ctl_ob_enc_bytes =
- debugfs_create_file("ob_bytes_encrypted",
- S_IRUSR | S_IRGRP | S_IROTH,
- ctrlpriv->ctl, &perfmon->ob_enc_bytes,
- &caam_fops_u64_ro);
- ctrlpriv->ctl_ob_prot_bytes =
- debugfs_create_file("ob_bytes_protected",
- S_IRUSR | S_IRGRP | S_IROTH,
- ctrlpriv->ctl, &perfmon->ob_prot_bytes,
- &caam_fops_u64_ro);
- ctrlpriv->ctl_ib_dec_bytes =
- debugfs_create_file("ib_bytes_decrypted",
- S_IRUSR | S_IRGRP | S_IROTH,
- ctrlpriv->ctl, &perfmon->ib_dec_bytes,
- &caam_fops_u64_ro);
- ctrlpriv->ctl_ib_valid_bytes =
- debugfs_create_file("ib_bytes_validated",
- S_IRUSR | S_IRGRP | S_IROTH,
- ctrlpriv->ctl, &perfmon->ib_valid_bytes,
- &caam_fops_u64_ro);
+ debugfs_create_file("rq_dequeued", S_IRUSR | S_IRGRP | S_IROTH,
+ ctrlpriv->ctl, &perfmon->req_dequeued,
+ &caam_fops_u64_ro);
+ debugfs_create_file("ob_rq_encrypted", S_IRUSR | S_IRGRP | S_IROTH,
+ ctrlpriv->ctl, &perfmon->ob_enc_req,
+ &caam_fops_u64_ro);
+ debugfs_create_file("ib_rq_decrypted", S_IRUSR | S_IRGRP | S_IROTH,
+ ctrlpriv->ctl, &perfmon->ib_dec_req,
+ &caam_fops_u64_ro);
+ debugfs_create_file("ob_bytes_encrypted", S_IRUSR | S_IRGRP | S_IROTH,
+ ctrlpriv->ctl, &perfmon->ob_enc_bytes,
+ &caam_fops_u64_ro);
+ debugfs_create_file("ob_bytes_protected", S_IRUSR | S_IRGRP | S_IROTH,
+ ctrlpriv->ctl, &perfmon->ob_prot_bytes,
+ &caam_fops_u64_ro);
+ debugfs_create_file("ib_bytes_decrypted", S_IRUSR | S_IRGRP | S_IROTH,
+ ctrlpriv->ctl, &perfmon->ib_dec_bytes,
+ &caam_fops_u64_ro);
+ debugfs_create_file("ib_bytes_validated", S_IRUSR | S_IRGRP | S_IROTH,
+ ctrlpriv->ctl, &perfmon->ib_valid_bytes,
+ &caam_fops_u64_ro);
/* Controller level - global status values */
- ctrlpriv->ctl_faultaddr =
- debugfs_create_file("fault_addr",
- S_IRUSR | S_IRGRP | S_IROTH,
- ctrlpriv->ctl, &perfmon->faultaddr,
- &caam_fops_u32_ro);
- ctrlpriv->ctl_faultdetail =
- debugfs_create_file("fault_detail",
- S_IRUSR | S_IRGRP | S_IROTH,
- ctrlpriv->ctl, &perfmon->faultdetail,
- &caam_fops_u32_ro);
- ctrlpriv->ctl_faultstatus =
- debugfs_create_file("fault_status",
- S_IRUSR | S_IRGRP | S_IROTH,
- ctrlpriv->ctl, &perfmon->status,
- &caam_fops_u32_ro);
+ debugfs_create_file("fault_addr", S_IRUSR | S_IRGRP | S_IROTH,
+ ctrlpriv->ctl, &perfmon->faultaddr,
+ &caam_fops_u32_ro);
+ debugfs_create_file("fault_detail", S_IRUSR | S_IRGRP | S_IROTH,
+ ctrlpriv->ctl, &perfmon->faultdetail,
+ &caam_fops_u32_ro);
+ debugfs_create_file("fault_status", S_IRUSR | S_IRGRP | S_IROTH,
+ ctrlpriv->ctl, &perfmon->status,
+ &caam_fops_u32_ro);
/* Internal covering keys (useful in non-secure mode only) */
ctrlpriv->ctl_kek_wrap.data = (__force void *)&ctrlpriv->ctrl->kek[0];
diff --git a/drivers/crypto/caam/ctrl.h b/drivers/crypto/caam/ctrl.h
index cac5402a46eb..7e7bf68c9ef5 100644
--- a/drivers/crypto/caam/ctrl.h
+++ b/drivers/crypto/caam/ctrl.h
@@ -10,4 +10,6 @@
/* Prototypes for backend-level services exposed to APIs */
int caam_get_era(void);
+extern bool caam_dpaa2;
+
#endif /* CTRL_H */
diff --git a/drivers/crypto/caam/error.c b/drivers/crypto/caam/error.c
index 6f44ccb55c63..3d639f3b45aa 100644
--- a/drivers/crypto/caam/error.c
+++ b/drivers/crypto/caam/error.c
@@ -9,6 +9,46 @@
#include "desc.h"
#include "error.h"
+#ifdef DEBUG
+#include <linux/highmem.h>
+
+void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type,
+ int rowsize, int groupsize, struct scatterlist *sg,
+ size_t tlen, bool ascii)
+{
+ struct scatterlist *it;
+ void *it_page;
+ size_t len;
+ void *buf;
+
+ for (it = sg; it && tlen > 0 ; it = sg_next(sg)) {
+ /*
+ * make sure the scatterlist's page
+ * has a valid virtual memory mapping
+ */
+ it_page = kmap_atomic(sg_page(it));
+ if (unlikely(!it_page)) {
+ pr_err("caam_dump_sg: kmap failed\n");
+ return;
+ }
+
+ buf = it_page + it->offset;
+ len = min_t(size_t, tlen, it->length);
+ print_hex_dump(level, prefix_str, prefix_type, rowsize,
+ groupsize, buf, len, ascii);
+ tlen -= len;
+
+ kunmap_atomic(it_page);
+ }
+}
+#else
+void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type,
+ int rowsize, int groupsize, struct scatterlist *sg,
+ size_t tlen, bool ascii)
+{}
+#endif /* DEBUG */
+EXPORT_SYMBOL(caam_dump_sg);
+
static const struct {
u8 value;
const char *error_text;
diff --git a/drivers/crypto/caam/error.h b/drivers/crypto/caam/error.h
index b6350b0d9153..250e1a21c473 100644
--- a/drivers/crypto/caam/error.h
+++ b/drivers/crypto/caam/error.h
@@ -8,4 +8,8 @@
#define CAAM_ERROR_H
#define CAAM_ERROR_STR_MAX 302
void caam_jr_strstatus(struct device *jrdev, u32 status);
+
+void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type,
+ int rowsize, int groupsize, struct scatterlist *sg,
+ size_t tlen, bool ascii);
#endif /* CAAM_ERROR_H */
diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h
index 85b6c5835b8f..a52361258d3a 100644
--- a/drivers/crypto/caam/intern.h
+++ b/drivers/crypto/caam/intern.h
@@ -64,12 +64,9 @@ struct caam_drv_private_jr {
* Driver-private storage for a single CAAM block instance
*/
struct caam_drv_private {
-
- struct device *dev;
#ifdef CONFIG_CAAM_QI
struct device *qidev;
#endif
- struct platform_device *pdev;
/* Physical-presence section */
struct caam_ctrl __iomem *ctrl; /* controller region */
@@ -105,16 +102,8 @@ struct caam_drv_private {
#ifdef CONFIG_DEBUG_FS
struct dentry *dfs_root;
struct dentry *ctl; /* controller dir */
- struct dentry *ctl_rq_dequeued, *ctl_ob_enc_req, *ctl_ib_dec_req;
- struct dentry *ctl_ob_enc_bytes, *ctl_ob_prot_bytes;
- struct dentry *ctl_ib_dec_bytes, *ctl_ib_valid_bytes;
- struct dentry *ctl_faultaddr, *ctl_faultdetail, *ctl_faultstatus;
-
struct debugfs_blob_wrapper ctl_kek_wrap, ctl_tkek_wrap, ctl_tdsk_wrap;
struct dentry *ctl_kek, *ctl_tkek, *ctl_tdsk;
-#ifdef CONFIG_CAAM_QI
- struct dentry *qi_congested;
-#endif
#endif
};
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
index 1ccfb317d468..d258953ff488 100644
--- a/drivers/crypto/caam/jr.c
+++ b/drivers/crypto/caam/jr.c
@@ -9,6 +9,7 @@
#include <linux/of_address.h>
#include "compat.h"
+#include "ctrl.h"
#include "regs.h"
#include "jr.h"
#include "desc.h"
@@ -499,7 +500,11 @@ static int caam_jr_probe(struct platform_device *pdev)
jrpriv->rregs = (struct caam_job_ring __iomem __force *)ctrl;
if (sizeof(dma_addr_t) == sizeof(u64)) {
- if (of_device_is_compatible(nprop, "fsl,sec-v5.0-job-ring"))
+ if (caam_dpaa2)
+ error = dma_set_mask_and_coherent(jrdev,
+ DMA_BIT_MASK(49));
+ else if (of_device_is_compatible(nprop,
+ "fsl,sec-v5.0-job-ring"))
error = dma_set_mask_and_coherent(jrdev,
DMA_BIT_MASK(40));
else
diff --git a/drivers/crypto/caam/qi.c b/drivers/crypto/caam/qi.c
index 1990ed460c46..e4cf00014233 100644
--- a/drivers/crypto/caam/qi.c
+++ b/drivers/crypto/caam/qi.c
@@ -24,9 +24,6 @@
*/
#define MAX_RSP_FQ_BACKLOG_PER_CPU 256
-/* Length of a single buffer in the QI driver memory cache */
-#define CAAM_QI_MEMCACHE_SIZE 512
-
#define CAAM_QI_ENQUEUE_RETRIES 10000
#define CAAM_NAPI_WEIGHT 63
@@ -55,6 +52,7 @@ struct caam_qi_pcpu_priv {
} ____cacheline_aligned;
static DEFINE_PER_CPU(struct caam_qi_pcpu_priv, pcpu_qipriv);
+static DEFINE_PER_CPU(int, last_cpu);
/*
* caam_qi_priv - CAAM QI backend private params
@@ -203,8 +201,8 @@ static struct qman_fq *create_caam_req_fq(struct device *qidev,
goto init_req_fq_fail;
}
- dev_info(qidev, "Allocated request FQ %u for CPU %u\n", req_fq->fqid,
- smp_processor_id());
+ dev_dbg(qidev, "Allocated request FQ %u for CPU %u\n", req_fq->fqid,
+ smp_processor_id());
return req_fq;
init_req_fq_fail:
@@ -277,6 +275,7 @@ empty_fq:
dev_err(qidev, "OOS of FQID: %u failed\n", fq->fqid);
qman_destroy_fq(fq);
+ kfree(fq);
return ret;
}
@@ -342,8 +341,7 @@ int caam_drv_ctx_update(struct caam_drv_ctx *drv_ctx, u32 *sh_desc)
drv_ctx->req_fq = old_fq;
if (kill_fq(qidev, new_fq))
- dev_warn(qidev, "New CAAM FQ: %u kill failed\n",
- new_fq->fqid);
+ dev_warn(qidev, "New CAAM FQ kill failed\n");
return ret;
}
@@ -373,10 +371,9 @@ int caam_drv_ctx_update(struct caam_drv_ctx *drv_ctx, u32 *sh_desc)
drv_ctx->req_fq = old_fq;
if (kill_fq(qidev, new_fq))
- dev_warn(qidev, "New CAAM FQ: %u kill failed\n",
- new_fq->fqid);
+ dev_warn(qidev, "New CAAM FQ kill failed\n");
} else if (kill_fq(qidev, old_fq)) {
- dev_warn(qidev, "Old CAAM FQ: %u kill failed\n", old_fq->fqid);
+ dev_warn(qidev, "Old CAAM FQ kill failed\n");
}
return 0;
@@ -392,7 +389,6 @@ struct caam_drv_ctx *caam_drv_ctx_init(struct device *qidev,
dma_addr_t hwdesc;
struct caam_drv_ctx *drv_ctx;
const cpumask_t *cpus = qman_affine_cpus();
- static DEFINE_PER_CPU(int, last_cpu);
num_words = desc_len(sh_desc);
if (num_words > MAX_SDLEN) {
@@ -511,7 +507,6 @@ int caam_qi_shutdown(struct device *qidev)
if (kill_fq(qidev, per_cpu(pcpu_qipriv.rsp_fq, i)))
dev_err(qidev, "Rsp FQ kill failed, cpu: %d\n", i);
- kfree(per_cpu(pcpu_qipriv.rsp_fq, i));
}
/*
@@ -646,7 +641,7 @@ static int alloc_rsp_fq_cpu(struct device *qidev, unsigned int cpu)
per_cpu(pcpu_qipriv.rsp_fq, cpu) = fq;
- dev_info(qidev, "Allocated response FQ %u for CPU %u", fq->fqid, cpu);
+ dev_dbg(qidev, "Allocated response FQ %u for CPU %u", fq->fqid, cpu);
return 0;
}
@@ -679,7 +674,7 @@ static int init_cgr(struct device *qidev)
return ret;
}
- dev_info(qidev, "Congestion threshold set to %llu\n", val);
+ dev_dbg(qidev, "Congestion threshold set to %llu\n", val);
return 0;
}
@@ -737,6 +732,7 @@ int caam_qi_init(struct platform_device *caam_pdev)
qi_pdev = platform_device_register_full(&qi_pdev_info);
if (IS_ERR(qi_pdev))
return PTR_ERR(qi_pdev);
+ set_dma_ops(&qi_pdev->dev, get_dma_ops(ctrldev));
ctrlpriv = dev_get_drvdata(ctrldev);
qidev = &qi_pdev->dev;
@@ -795,10 +791,8 @@ int caam_qi_init(struct platform_device *caam_pdev)
/* Done with the CGRs; restore the cpus allowed mask */
set_cpus_allowed_ptr(current, &old_cpumask);
#ifdef CONFIG_DEBUG_FS
- ctrlpriv->qi_congested = debugfs_create_file("qi_congested", 0444,
- ctrlpriv->ctl,
- &times_congested,
- &caam_fops_u64_ro);
+ debugfs_create_file("qi_congested", 0444, ctrlpriv->ctl,
+ &times_congested, &caam_fops_u64_ro);
#endif
dev_info(qidev, "Linux CAAM Queue I/F driver initialised\n");
return 0;
diff --git a/drivers/crypto/caam/qi.h b/drivers/crypto/caam/qi.h
index 33b0433f5f22..ecb21f207637 100644
--- a/drivers/crypto/caam/qi.h
+++ b/drivers/crypto/caam/qi.h
@@ -39,6 +39,9 @@
*/
#define MAX_SDLEN ((CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN) / CAAM_CMD_SZ)
+/* Length of a single buffer in the QI driver memory cache */
+#define CAAM_QI_MEMCACHE_SIZE 768
+
extern bool caam_congested __read_mostly;
/*
diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h
index 84d2f838a063..2b5efff9ec3c 100644
--- a/drivers/crypto/caam/regs.h
+++ b/drivers/crypto/caam/regs.h
@@ -293,6 +293,7 @@ struct caam_perfmon {
u32 cha_rev_ls; /* CRNR - CHA Rev No. Least significant half*/
#define CTPR_MS_QI_SHIFT 25
#define CTPR_MS_QI_MASK (0x1ull << CTPR_MS_QI_SHIFT)
+#define CTPR_MS_DPAA2 BIT(13)
#define CTPR_MS_VIRT_EN_INCL 0x00000001
#define CTPR_MS_VIRT_EN_POR 0x00000002
#define CTPR_MS_PG_SZ_MASK 0x10
diff --git a/drivers/crypto/caam/sg_sw_qm2.h b/drivers/crypto/caam/sg_sw_qm2.h
new file mode 100644
index 000000000000..31b440757146
--- /dev/null
+++ b/drivers/crypto/caam/sg_sw_qm2.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
+ * Copyright 2017 NXP
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the names of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SG_SW_QM2_H_
+#define _SG_SW_QM2_H_
+
+#include "../../../drivers/staging/fsl-mc/include/dpaa2-fd.h"
+
+static inline void dma_to_qm_sg_one(struct dpaa2_sg_entry *qm_sg_ptr,
+ dma_addr_t dma, u32 len, u16 offset)
+{
+ dpaa2_sg_set_addr(qm_sg_ptr, dma);
+ dpaa2_sg_set_format(qm_sg_ptr, dpaa2_sg_single);
+ dpaa2_sg_set_final(qm_sg_ptr, false);
+ dpaa2_sg_set_len(qm_sg_ptr, len);
+ dpaa2_sg_set_bpid(qm_sg_ptr, 0);
+ dpaa2_sg_set_offset(qm_sg_ptr, offset);
+}
+
+/*
+ * convert scatterlist to h/w link table format
+ * but does not have final bit; instead, returns last entry
+ */
+static inline struct dpaa2_sg_entry *
+sg_to_qm_sg(struct scatterlist *sg, int sg_count,
+ struct dpaa2_sg_entry *qm_sg_ptr, u16 offset)
+{
+ while (sg_count && sg) {
+ dma_to_qm_sg_one(qm_sg_ptr, sg_dma_address(sg),
+ sg_dma_len(sg), offset);
+ qm_sg_ptr++;
+ sg = sg_next(sg);
+ sg_count--;
+ }
+ return qm_sg_ptr - 1;
+}
+
+/*
+ * convert scatterlist to h/w link table format
+ * scatterlist must have been previously dma mapped
+ */
+static inline void sg_to_qm_sg_last(struct scatterlist *sg, int sg_count,
+ struct dpaa2_sg_entry *qm_sg_ptr,
+ u16 offset)
+{
+ qm_sg_ptr = sg_to_qm_sg(sg, sg_count, qm_sg_ptr, offset);
+ dpaa2_sg_set_final(qm_sg_ptr, true);
+}
+
+#endif /* _SG_SW_QM2_H_ */
diff --git a/drivers/crypto/caam/sg_sw_sec4.h b/drivers/crypto/caam/sg_sw_sec4.h
index c6adad09c972..936b1b630058 100644
--- a/drivers/crypto/caam/sg_sw_sec4.h
+++ b/drivers/crypto/caam/sg_sw_sec4.h
@@ -5,7 +5,13 @@
*
*/
+#ifndef _SG_SW_SEC4_H_
+#define _SG_SW_SEC4_H_
+
+#include "ctrl.h"
#include "regs.h"
+#include "sg_sw_qm2.h"
+#include "../../../drivers/staging/fsl-mc/include/dpaa2-fd.h"
struct sec4_sg_entry {
u64 ptr;
@@ -19,9 +25,15 @@ struct sec4_sg_entry {
static inline void dma_to_sec4_sg_one(struct sec4_sg_entry *sec4_sg_ptr,
dma_addr_t dma, u32 len, u16 offset)
{
- sec4_sg_ptr->ptr = cpu_to_caam_dma64(dma);
- sec4_sg_ptr->len = cpu_to_caam32(len);
- sec4_sg_ptr->bpid_offset = cpu_to_caam32(offset & SEC4_SG_OFFSET_MASK);
+ if (caam_dpaa2) {
+ dma_to_qm_sg_one((struct dpaa2_sg_entry *)sec4_sg_ptr, dma, len,
+ offset);
+ } else {
+ sec4_sg_ptr->ptr = cpu_to_caam_dma64(dma);
+ sec4_sg_ptr->len = cpu_to_caam32(len);
+ sec4_sg_ptr->bpid_offset = cpu_to_caam32(offset &
+ SEC4_SG_OFFSET_MASK);
+ }
#ifdef DEBUG
print_hex_dump(KERN_ERR, "sec4_sg_ptr@: ",
DUMP_PREFIX_ADDRESS, 16, 4, sec4_sg_ptr,
@@ -47,6 +59,14 @@ sg_to_sec4_sg(struct scatterlist *sg, int sg_count,
return sec4_sg_ptr - 1;
}
+static inline void sg_to_sec4_set_last(struct sec4_sg_entry *sec4_sg_ptr)
+{
+ if (caam_dpaa2)
+ dpaa2_sg_set_final((struct dpaa2_sg_entry *)sec4_sg_ptr, true);
+ else
+ sec4_sg_ptr->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
+}
+
/*
* convert scatterlist to h/w link table format
* scatterlist must have been previously dma mapped
@@ -56,20 +76,7 @@ static inline void sg_to_sec4_sg_last(struct scatterlist *sg, int sg_count,
u16 offset)
{
sec4_sg_ptr = sg_to_sec4_sg(sg, sg_count, sec4_sg_ptr, offset);
- sec4_sg_ptr->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
+ sg_to_sec4_set_last(sec4_sg_ptr);
}
-static inline struct sec4_sg_entry *sg_to_sec4_sg_len(
- struct scatterlist *sg, unsigned int total,
- struct sec4_sg_entry *sec4_sg_ptr)
-{
- do {
- unsigned int len = min(sg_dma_len(sg), total);
-
- dma_to_sec4_sg_one(sec4_sg_ptr, sg_dma_address(sg), len, 0);
- sec4_sg_ptr++;
- sg = sg_next(sg);
- total -= len;
- } while (total);
- return sec4_sg_ptr - 1;
-}
+#endif /* _SG_SW_SEC4_H_ */
diff --git a/drivers/crypto/cavium/cpt/cptpf_main.c b/drivers/crypto/cavium/cpt/cptpf_main.c
index 4119c40e7c4b..34a6d8bf229e 100644
--- a/drivers/crypto/cavium/cpt/cptpf_main.c
+++ b/drivers/crypto/cavium/cpt/cptpf_main.c
@@ -268,8 +268,10 @@ static int cpt_ucode_load_fw(struct cpt_device *cpt, const u8 *fw, bool is_ae)
mcode = &cpt->mcode[cpt->next_mc_idx];
memcpy(mcode->version, (u8 *)fw_entry->data, CPT_UCODE_VERSION_SZ);
mcode->code_size = ntohl(ucode->code_length) * 2;
- if (!mcode->code_size)
- return -EINVAL;
+ if (!mcode->code_size) {
+ ret = -EINVAL;
+ goto fw_release;
+ }
mcode->is_ae = is_ae;
mcode->core_mask = 0ULL;
@@ -280,7 +282,8 @@ static int cpt_ucode_load_fw(struct cpt_device *cpt, const u8 *fw, bool is_ae)
&mcode->phys_base, GFP_KERNEL);
if (!mcode->code) {
dev_err(dev, "Unable to allocate space for microcode");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto fw_release;
}
memcpy((void *)mcode->code, (void *)(fw_entry->data + sizeof(*ucode)),
@@ -302,12 +305,14 @@ static int cpt_ucode_load_fw(struct cpt_device *cpt, const u8 *fw, bool is_ae)
ret = do_cpt_init(cpt, mcode);
if (ret) {
dev_err(dev, "do_cpt_init failed with ret: %d\n", ret);
- return ret;
+ goto fw_release;
}
dev_info(dev, "Microcode Loaded %s\n", mcode->version);
mcode->is_mc_valid = 1;
cpt->next_mc_idx++;
+
+fw_release:
release_firmware(fw_entry);
return ret;
diff --git a/drivers/crypto/cavium/nitrox/nitrox_main.c b/drivers/crypto/cavium/nitrox/nitrox_main.c
index 9ccefb9b7232..fee7cb2ce747 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_main.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_main.c
@@ -513,8 +513,10 @@ static int nitrox_probe(struct pci_dev *pdev,
pci_set_master(pdev);
ndev = kzalloc(sizeof(*ndev), GFP_KERNEL);
- if (!ndev)
+ if (!ndev) {
+ err = -ENOMEM;
goto ndev_fail;
+ }
pci_set_drvdata(pdev, ndev);
ndev->pdev = pdev;
diff --git a/drivers/crypto/ccp/Kconfig b/drivers/crypto/ccp/Kconfig
index 2238f77aa248..6d626606b9c5 100644
--- a/drivers/crypto/ccp/Kconfig
+++ b/drivers/crypto/ccp/Kconfig
@@ -1,25 +1,33 @@
config CRYPTO_DEV_CCP_DD
- tristate "Cryptographic Coprocessor device driver"
- depends on CRYPTO_DEV_CCP
+ tristate "Secure Processor device driver"
default m
+ help
+ Provides AMD Secure Processor device driver.
+ If you choose 'M' here, this module will be called ccp.
+
+config CRYPTO_DEV_SP_CCP
+ bool "Cryptographic Coprocessor device"
+ default y
+ depends on CRYPTO_DEV_CCP_DD
select HW_RANDOM
select DMA_ENGINE
select DMADEVICES
select CRYPTO_SHA1
select CRYPTO_SHA256
help
- Provides the interface to use the AMD Cryptographic Coprocessor
- which can be used to offload encryption operations such as SHA,
- AES and more. If you choose 'M' here, this module will be called
- ccp.
+ Provides the support for AMD Cryptographic Coprocessor (CCP) device
+ which can be used to offload encryption operations such as SHA, AES
+ and more.
config CRYPTO_DEV_CCP_CRYPTO
tristate "Encryption and hashing offload support"
- depends on CRYPTO_DEV_CCP_DD
default m
+ depends on CRYPTO_DEV_CCP_DD
+ depends on CRYPTO_DEV_SP_CCP
select CRYPTO_HASH
select CRYPTO_BLKCIPHER
select CRYPTO_AUTHENC
+ select CRYPTO_RSA
help
Support for using the cryptographic API with the AMD Cryptographic
Coprocessor. This module supports offload of SHA and AES algorithms.
diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile
index 59493fd3a751..57f8debfcfb3 100644
--- a/drivers/crypto/ccp/Makefile
+++ b/drivers/crypto/ccp/Makefile
@@ -1,12 +1,12 @@
obj-$(CONFIG_CRYPTO_DEV_CCP_DD) += ccp.o
-ccp-objs := ccp-dev.o \
+ccp-objs := sp-dev.o sp-platform.o
+ccp-$(CONFIG_CRYPTO_DEV_SP_CCP) += ccp-dev.o \
ccp-ops.o \
ccp-dev-v3.o \
ccp-dev-v5.o \
- ccp-platform.o \
ccp-dmaengine.o \
ccp-debugfs.o
-ccp-$(CONFIG_PCI) += ccp-pci.o
+ccp-$(CONFIG_PCI) += sp-pci.o
obj-$(CONFIG_CRYPTO_DEV_CCP_CRYPTO) += ccp-crypto.o
ccp-crypto-objs := ccp-crypto-main.o \
@@ -15,4 +15,5 @@ ccp-crypto-objs := ccp-crypto-main.o \
ccp-crypto-aes-xts.o \
ccp-crypto-aes-galois.o \
ccp-crypto-des3.o \
+ ccp-crypto-rsa.o \
ccp-crypto-sha.o
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-galois.c b/drivers/crypto/ccp/ccp-crypto-aes-galois.c
index 38ee6f348ea9..52313524a4dd 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-galois.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-galois.c
@@ -1,7 +1,7 @@
/*
* AMD Cryptographic Coprocessor (CCP) AES GCM crypto API support
*
- * Copyright (C) 2016 Advanced Micro Devices, Inc.
+ * Copyright (C) 2016,2017 Advanced Micro Devices, Inc.
*
* Author: Gary R Hook <gary.hook@amd.com>
*
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-xts.c b/drivers/crypto/ccp/ccp-crypto-aes-xts.c
index 58a4244b4752..94b5bcf5b628 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-xts.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-xts.c
@@ -1,8 +1,9 @@
/*
* AMD Cryptographic Coprocessor (CCP) AES XTS crypto API support
*
- * Copyright (C) 2013 Advanced Micro Devices, Inc.
+ * Copyright (C) 2013,2017 Advanced Micro Devices, Inc.
*
+ * Author: Gary R Hook <gary.hook@amd.com>
* Author: Tom Lendacky <thomas.lendacky@amd.com>
*
* This program is free software; you can redistribute it and/or modify
@@ -15,6 +16,7 @@
#include <linux/delay.h>
#include <linux/scatterlist.h>
#include <crypto/aes.h>
+#include <crypto/xts.h>
#include <crypto/internal/skcipher.h>
#include <crypto/scatterwalk.h>
@@ -37,46 +39,26 @@ struct ccp_unit_size_map {
u32 value;
};
-static struct ccp_unit_size_map unit_size_map[] = {
+static struct ccp_unit_size_map xts_unit_sizes[] = {
{
- .size = 4096,
- .value = CCP_XTS_AES_UNIT_SIZE_4096,
- },
- {
- .size = 2048,
- .value = CCP_XTS_AES_UNIT_SIZE_2048,
- },
- {
- .size = 1024,
- .value = CCP_XTS_AES_UNIT_SIZE_1024,
+ .size = 16,
+ .value = CCP_XTS_AES_UNIT_SIZE_16,
},
{
- .size = 512,
+ .size = 512,
.value = CCP_XTS_AES_UNIT_SIZE_512,
},
{
- .size = 256,
- .value = CCP_XTS_AES_UNIT_SIZE__LAST,
- },
- {
- .size = 128,
- .value = CCP_XTS_AES_UNIT_SIZE__LAST,
- },
- {
- .size = 64,
- .value = CCP_XTS_AES_UNIT_SIZE__LAST,
- },
- {
- .size = 32,
- .value = CCP_XTS_AES_UNIT_SIZE__LAST,
+ .size = 1024,
+ .value = CCP_XTS_AES_UNIT_SIZE_1024,
},
{
- .size = 16,
- .value = CCP_XTS_AES_UNIT_SIZE_16,
+ .size = 2048,
+ .value = CCP_XTS_AES_UNIT_SIZE_2048,
},
{
- .size = 1,
- .value = CCP_XTS_AES_UNIT_SIZE__LAST,
+ .size = 4096,
+ .value = CCP_XTS_AES_UNIT_SIZE_4096,
},
};
@@ -96,15 +78,26 @@ static int ccp_aes_xts_complete(struct crypto_async_request *async_req, int ret)
static int ccp_aes_xts_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
unsigned int key_len)
{
- struct ccp_ctx *ctx = crypto_tfm_ctx(crypto_ablkcipher_tfm(tfm));
+ struct crypto_tfm *xfm = crypto_ablkcipher_tfm(tfm);
+ struct ccp_ctx *ctx = crypto_tfm_ctx(xfm);
+ unsigned int ccpversion = ccp_version();
+ int ret;
- /* Only support 128-bit AES key with a 128-bit Tweak key,
- * otherwise use the fallback
+ ret = xts_check_key(xfm, key, key_len);
+ if (ret)
+ return ret;
+
+ /* Version 3 devices support 128-bit keys; version 5 devices can
+ * accommodate 128- and 256-bit keys.
*/
switch (key_len) {
case AES_KEYSIZE_128 * 2:
memcpy(ctx->u.aes.key, key, key_len);
break;
+ case AES_KEYSIZE_256 * 2:
+ if (ccpversion > CCP_VERSION(3, 0))
+ memcpy(ctx->u.aes.key, key, key_len);
+ break;
}
ctx->u.aes.key_len = key_len / 2;
sg_init_one(&ctx->u.aes.key_sg, ctx->u.aes.key, key_len);
@@ -117,6 +110,8 @@ static int ccp_aes_xts_crypt(struct ablkcipher_request *req,
{
struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
+ unsigned int ccpversion = ccp_version();
+ unsigned int fallback = 0;
unsigned int unit;
u32 unit_size;
int ret;
@@ -130,18 +125,32 @@ static int ccp_aes_xts_crypt(struct ablkcipher_request *req,
if (!req->info)
return -EINVAL;
+ /* Check conditions under which the CCP can fulfill a request. The
+ * device can handle input plaintext of a length that is a multiple
+ * of the unit_size, bug the crypto implementation only supports
+ * the unit_size being equal to the input length. This limits the
+ * number of scenarios we can handle.
+ */
unit_size = CCP_XTS_AES_UNIT_SIZE__LAST;
- if (req->nbytes <= unit_size_map[0].size) {
- for (unit = 0; unit < ARRAY_SIZE(unit_size_map); unit++) {
- if (!(req->nbytes & (unit_size_map[unit].size - 1))) {
- unit_size = unit_size_map[unit].value;
- break;
- }
+ for (unit = 0; unit < ARRAY_SIZE(xts_unit_sizes); unit++) {
+ if (req->nbytes == xts_unit_sizes[unit].size) {
+ unit_size = unit;
+ break;
}
}
-
- if ((unit_size == CCP_XTS_AES_UNIT_SIZE__LAST) ||
- (ctx->u.aes.key_len != AES_KEYSIZE_128)) {
+ /* The CCP has restrictions on block sizes. Also, a version 3 device
+ * only supports AES-128 operations; version 5 CCPs support both
+ * AES-128 and -256 operations.
+ */
+ if (unit_size == CCP_XTS_AES_UNIT_SIZE__LAST)
+ fallback = 1;
+ if ((ccpversion < CCP_VERSION(5, 0)) &&
+ (ctx->u.aes.key_len != AES_KEYSIZE_128))
+ fallback = 1;
+ if ((ctx->u.aes.key_len != AES_KEYSIZE_128) &&
+ (ctx->u.aes.key_len != AES_KEYSIZE_256))
+ fallback = 1;
+ if (fallback) {
SKCIPHER_REQUEST_ON_STACK(subreq, ctx->u.aes.tfm_skcipher);
/* Use the fallback to process the request for any
@@ -164,6 +173,7 @@ static int ccp_aes_xts_crypt(struct ablkcipher_request *req,
memset(&rctx->cmd, 0, sizeof(rctx->cmd));
INIT_LIST_HEAD(&rctx->cmd.entry);
rctx->cmd.engine = CCP_ENGINE_XTS_AES_128;
+ rctx->cmd.u.xts.type = CCP_AES_TYPE_128;
rctx->cmd.u.xts.action = (encrypt) ? CCP_AES_ACTION_ENCRYPT
: CCP_AES_ACTION_DECRYPT;
rctx->cmd.u.xts.unit_size = unit_size;
diff --git a/drivers/crypto/ccp/ccp-crypto-des3.c b/drivers/crypto/ccp/ccp-crypto-des3.c
index 5af7347ae03c..ae87b741f9d5 100644
--- a/drivers/crypto/ccp/ccp-crypto-des3.c
+++ b/drivers/crypto/ccp/ccp-crypto-des3.c
@@ -1,7 +1,7 @@
/*
* AMD Cryptographic Coprocessor (CCP) DES3 crypto API support
*
- * Copyright (C) 2016 Advanced Micro Devices, Inc.
+ * Copyright (C) 2016,2017 Advanced Micro Devices, Inc.
*
* Author: Gary R Hook <ghook@amd.com>
*
diff --git a/drivers/crypto/ccp/ccp-crypto-main.c b/drivers/crypto/ccp/ccp-crypto-main.c
index 8dccbddabef1..35a9de7fd475 100644
--- a/drivers/crypto/ccp/ccp-crypto-main.c
+++ b/drivers/crypto/ccp/ccp-crypto-main.c
@@ -1,7 +1,7 @@
/*
* AMD Cryptographic Coprocessor (CCP) crypto API support
*
- * Copyright (C) 2013 Advanced Micro Devices, Inc.
+ * Copyright (C) 2013,2017 Advanced Micro Devices, Inc.
*
* Author: Tom Lendacky <thomas.lendacky@amd.com>
*
@@ -17,6 +17,7 @@
#include <linux/ccp.h>
#include <linux/scatterlist.h>
#include <crypto/internal/hash.h>
+#include <crypto/internal/akcipher.h>
#include "ccp-crypto.h"
@@ -37,10 +38,15 @@ static unsigned int des3_disable;
module_param(des3_disable, uint, 0444);
MODULE_PARM_DESC(des3_disable, "Disable use of 3DES - any non-zero value");
+static unsigned int rsa_disable;
+module_param(rsa_disable, uint, 0444);
+MODULE_PARM_DESC(rsa_disable, "Disable use of RSA - any non-zero value");
+
/* List heads for the supported algorithms */
static LIST_HEAD(hash_algs);
static LIST_HEAD(cipher_algs);
static LIST_HEAD(aead_algs);
+static LIST_HEAD(akcipher_algs);
/* For any tfm, requests for that tfm must be returned on the order
* received. With multiple queues available, the CCP can process more
@@ -358,6 +364,12 @@ static int ccp_register_algs(void)
return ret;
}
+ if (!rsa_disable) {
+ ret = ccp_register_rsa_algs(&akcipher_algs);
+ if (ret)
+ return ret;
+ }
+
return 0;
}
@@ -366,6 +378,7 @@ static void ccp_unregister_algs(void)
struct ccp_crypto_ahash_alg *ahash_alg, *ahash_tmp;
struct ccp_crypto_ablkcipher_alg *ablk_alg, *ablk_tmp;
struct ccp_crypto_aead *aead_alg, *aead_tmp;
+ struct ccp_crypto_akcipher_alg *akc_alg, *akc_tmp;
list_for_each_entry_safe(ahash_alg, ahash_tmp, &hash_algs, entry) {
crypto_unregister_ahash(&ahash_alg->alg);
@@ -384,6 +397,12 @@ static void ccp_unregister_algs(void)
list_del(&aead_alg->entry);
kfree(aead_alg);
}
+
+ list_for_each_entry_safe(akc_alg, akc_tmp, &akcipher_algs, entry) {
+ crypto_unregister_akcipher(&akc_alg->alg);
+ list_del(&akc_alg->entry);
+ kfree(akc_alg);
+ }
}
static int ccp_crypto_init(void)
diff --git a/drivers/crypto/ccp/ccp-crypto-rsa.c b/drivers/crypto/ccp/ccp-crypto-rsa.c
new file mode 100644
index 000000000000..e6db8672d89c
--- /dev/null
+++ b/drivers/crypto/ccp/ccp-crypto-rsa.c
@@ -0,0 +1,299 @@
+/*
+ * AMD Cryptographic Coprocessor (CCP) RSA crypto API support
+ *
+ * Copyright (C) 2017 Advanced Micro Devices, Inc.
+ *
+ * Author: Gary R Hook <gary.hook@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/scatterlist.h>
+#include <linux/crypto.h>
+#include <crypto/algapi.h>
+#include <crypto/internal/rsa.h>
+#include <crypto/internal/akcipher.h>
+#include <crypto/akcipher.h>
+#include <crypto/scatterwalk.h>
+
+#include "ccp-crypto.h"
+
+static inline struct akcipher_request *akcipher_request_cast(
+ struct crypto_async_request *req)
+{
+ return container_of(req, struct akcipher_request, base);
+}
+
+static inline int ccp_copy_and_save_keypart(u8 **kpbuf, unsigned int *kplen,
+ const u8 *buf, size_t sz)
+{
+ int nskip;
+
+ for (nskip = 0; nskip < sz; nskip++)
+ if (buf[nskip])
+ break;
+ *kplen = sz - nskip;
+ *kpbuf = kzalloc(*kplen, GFP_KERNEL);
+ if (!*kpbuf)
+ return -ENOMEM;
+ memcpy(*kpbuf, buf + nskip, *kplen);
+
+ return 0;
+}
+
+static int ccp_rsa_complete(struct crypto_async_request *async_req, int ret)
+{
+ struct akcipher_request *req = akcipher_request_cast(async_req);
+ struct ccp_rsa_req_ctx *rctx = akcipher_request_ctx(req);
+
+ if (ret)
+ return ret;
+
+ req->dst_len = rctx->cmd.u.rsa.key_size >> 3;
+
+ return 0;
+}
+
+static unsigned int ccp_rsa_maxsize(struct crypto_akcipher *tfm)
+{
+ if (ccp_version() > CCP_VERSION(3, 0))
+ return CCP5_RSA_MAXMOD;
+ else
+ return CCP_RSA_MAXMOD;
+}
+
+static int ccp_rsa_crypt(struct akcipher_request *req, bool encrypt)
+{
+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+ struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct ccp_rsa_req_ctx *rctx = akcipher_request_ctx(req);
+ int ret = 0;
+
+ memset(&rctx->cmd, 0, sizeof(rctx->cmd));
+ INIT_LIST_HEAD(&rctx->cmd.entry);
+ rctx->cmd.engine = CCP_ENGINE_RSA;
+
+ rctx->cmd.u.rsa.key_size = ctx->u.rsa.key_len; /* in bits */
+ if (encrypt) {
+ rctx->cmd.u.rsa.exp = &ctx->u.rsa.e_sg;
+ rctx->cmd.u.rsa.exp_len = ctx->u.rsa.e_len;
+ } else {
+ rctx->cmd.u.rsa.exp = &ctx->u.rsa.d_sg;
+ rctx->cmd.u.rsa.exp_len = ctx->u.rsa.d_len;
+ }
+ rctx->cmd.u.rsa.mod = &ctx->u.rsa.n_sg;
+ rctx->cmd.u.rsa.mod_len = ctx->u.rsa.n_len;
+ rctx->cmd.u.rsa.src = req->src;
+ rctx->cmd.u.rsa.src_len = req->src_len;
+ rctx->cmd.u.rsa.dst = req->dst;
+
+ ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd);
+
+ return ret;
+}
+
+static int ccp_rsa_encrypt(struct akcipher_request *req)
+{
+ return ccp_rsa_crypt(req, true);
+}
+
+static int ccp_rsa_decrypt(struct akcipher_request *req)
+{
+ return ccp_rsa_crypt(req, false);
+}
+
+static int ccp_check_key_length(unsigned int len)
+{
+ /* In bits */
+ if (len < 8 || len > 4096)
+ return -EINVAL;
+ return 0;
+}
+
+static void ccp_rsa_free_key_bufs(struct ccp_ctx *ctx)
+{
+ /* Clean up old key data */
+ kzfree(ctx->u.rsa.e_buf);
+ ctx->u.rsa.e_buf = NULL;
+ ctx->u.rsa.e_len = 0;
+ kzfree(ctx->u.rsa.n_buf);
+ ctx->u.rsa.n_buf = NULL;
+ ctx->u.rsa.n_len = 0;
+ kzfree(ctx->u.rsa.d_buf);
+ ctx->u.rsa.d_buf = NULL;
+ ctx->u.rsa.d_len = 0;
+}
+
+static int ccp_rsa_setkey(struct crypto_akcipher *tfm, const void *key,
+ unsigned int keylen, bool private)
+{
+ struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct rsa_key raw_key;
+ int ret;
+
+ ccp_rsa_free_key_bufs(ctx);
+ memset(&raw_key, 0, sizeof(raw_key));
+
+ /* Code borrowed from crypto/rsa.c */
+ if (private)
+ ret = rsa_parse_priv_key(&raw_key, key, keylen);
+ else
+ ret = rsa_parse_pub_key(&raw_key, key, keylen);
+ if (ret)
+ goto n_key;
+
+ ret = ccp_copy_and_save_keypart(&ctx->u.rsa.n_buf, &ctx->u.rsa.n_len,
+ raw_key.n, raw_key.n_sz);
+ if (ret)
+ goto key_err;
+ sg_init_one(&ctx->u.rsa.n_sg, ctx->u.rsa.n_buf, ctx->u.rsa.n_len);
+
+ ctx->u.rsa.key_len = ctx->u.rsa.n_len << 3; /* convert to bits */
+ if (ccp_check_key_length(ctx->u.rsa.key_len)) {
+ ret = -EINVAL;
+ goto key_err;
+ }
+
+ ret = ccp_copy_and_save_keypart(&ctx->u.rsa.e_buf, &ctx->u.rsa.e_len,
+ raw_key.e, raw_key.e_sz);
+ if (ret)
+ goto key_err;
+ sg_init_one(&ctx->u.rsa.e_sg, ctx->u.rsa.e_buf, ctx->u.rsa.e_len);
+
+ if (private) {
+ ret = ccp_copy_and_save_keypart(&ctx->u.rsa.d_buf,
+ &ctx->u.rsa.d_len,
+ raw_key.d, raw_key.d_sz);
+ if (ret)
+ goto key_err;
+ sg_init_one(&ctx->u.rsa.d_sg,
+ ctx->u.rsa.d_buf, ctx->u.rsa.d_len);
+ }
+
+ return 0;
+
+key_err:
+ ccp_rsa_free_key_bufs(ctx);
+
+n_key:
+ return ret;
+}
+
+static int ccp_rsa_setprivkey(struct crypto_akcipher *tfm, const void *key,
+ unsigned int keylen)
+{
+ return ccp_rsa_setkey(tfm, key, keylen, true);
+}
+
+static int ccp_rsa_setpubkey(struct crypto_akcipher *tfm, const void *key,
+ unsigned int keylen)
+{
+ return ccp_rsa_setkey(tfm, key, keylen, false);
+}
+
+static int ccp_rsa_init_tfm(struct crypto_akcipher *tfm)
+{
+ struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm);
+
+ akcipher_set_reqsize(tfm, sizeof(struct ccp_rsa_req_ctx));
+ ctx->complete = ccp_rsa_complete;
+
+ return 0;
+}
+
+static void ccp_rsa_exit_tfm(struct crypto_akcipher *tfm)
+{
+ struct ccp_ctx *ctx = crypto_tfm_ctx(&tfm->base);
+
+ ccp_rsa_free_key_bufs(ctx);
+}
+
+static struct akcipher_alg ccp_rsa_defaults = {
+ .encrypt = ccp_rsa_encrypt,
+ .decrypt = ccp_rsa_decrypt,
+ .sign = ccp_rsa_decrypt,
+ .verify = ccp_rsa_encrypt,
+ .set_pub_key = ccp_rsa_setpubkey,
+ .set_priv_key = ccp_rsa_setprivkey,
+ .max_size = ccp_rsa_maxsize,
+ .init = ccp_rsa_init_tfm,
+ .exit = ccp_rsa_exit_tfm,
+ .base = {
+ .cra_name = "rsa",
+ .cra_driver_name = "rsa-ccp",
+ .cra_priority = CCP_CRA_PRIORITY,
+ .cra_module = THIS_MODULE,
+ .cra_ctxsize = 2 * sizeof(struct ccp_ctx),
+ },
+};
+
+struct ccp_rsa_def {
+ unsigned int version;
+ const char *name;
+ const char *driver_name;
+ unsigned int reqsize;
+ struct akcipher_alg *alg_defaults;
+};
+
+static struct ccp_rsa_def rsa_algs[] = {
+ {
+ .version = CCP_VERSION(3, 0),
+ .name = "rsa",
+ .driver_name = "rsa-ccp",
+ .reqsize = sizeof(struct ccp_rsa_req_ctx),
+ .alg_defaults = &ccp_rsa_defaults,
+ }
+};
+
+int ccp_register_rsa_alg(struct list_head *head, const struct ccp_rsa_def *def)
+{
+ struct ccp_crypto_akcipher_alg *ccp_alg;
+ struct akcipher_alg *alg;
+ int ret;
+
+ ccp_alg = kzalloc(sizeof(*ccp_alg), GFP_KERNEL);
+ if (!ccp_alg)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&ccp_alg->entry);
+
+ alg = &ccp_alg->alg;
+ *alg = *def->alg_defaults;
+ snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
+ snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
+ def->driver_name);
+ ret = crypto_register_akcipher(alg);
+ if (ret) {
+ pr_err("%s akcipher algorithm registration error (%d)\n",
+ alg->base.cra_name, ret);
+ kfree(ccp_alg);
+ return ret;
+ }
+
+ list_add(&ccp_alg->entry, head);
+
+ return 0;
+}
+
+int ccp_register_rsa_algs(struct list_head *head)
+{
+ int i, ret;
+ unsigned int ccpversion = ccp_version();
+
+ /* Register the RSA algorithm in standard mode
+ * This works for CCP v3 and later
+ */
+ for (i = 0; i < ARRAY_SIZE(rsa_algs); i++) {
+ if (rsa_algs[i].version > ccpversion)
+ continue;
+ ret = ccp_register_rsa_alg(head, &rsa_algs[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
diff --git a/drivers/crypto/ccp/ccp-crypto-sha.c b/drivers/crypto/ccp/ccp-crypto-sha.c
index ce97b3868f4a..8b9b16d433f7 100644
--- a/drivers/crypto/ccp/ccp-crypto-sha.c
+++ b/drivers/crypto/ccp/ccp-crypto-sha.c
@@ -1,7 +1,7 @@
/*
* AMD Cryptographic Coprocessor (CCP) SHA crypto API support
*
- * Copyright (C) 2013,2016 Advanced Micro Devices, Inc.
+ * Copyright (C) 2013,2017 Advanced Micro Devices, Inc.
*
* Author: Tom Lendacky <thomas.lendacky@amd.com>
* Author: Gary R Hook <gary.hook@amd.com>
diff --git a/drivers/crypto/ccp/ccp-crypto.h b/drivers/crypto/ccp/ccp-crypto.h
index dd5bf15f06e5..b9fd090c46c2 100644
--- a/drivers/crypto/ccp/ccp-crypto.h
+++ b/drivers/crypto/ccp/ccp-crypto.h
@@ -1,7 +1,7 @@
/*
* AMD Cryptographic Coprocessor (CCP) crypto API support
*
- * Copyright (C) 2013 Advanced Micro Devices, Inc.
+ * Copyright (C) 2013,2017 Advanced Micro Devices, Inc.
*
* Author: Tom Lendacky <thomas.lendacky@amd.com>
*
@@ -24,6 +24,8 @@
#include <crypto/ctr.h>
#include <crypto/hash.h>
#include <crypto/sha.h>
+#include <crypto/akcipher.h>
+#include <crypto/internal/rsa.h>
#define CCP_LOG_LEVEL KERN_INFO
@@ -58,6 +60,12 @@ struct ccp_crypto_ahash_alg {
struct ahash_alg alg;
};
+struct ccp_crypto_akcipher_alg {
+ struct list_head entry;
+
+ struct akcipher_alg alg;
+};
+
static inline struct ccp_crypto_ablkcipher_alg *
ccp_crypto_ablkcipher_alg(struct crypto_tfm *tfm)
{
@@ -91,7 +99,7 @@ struct ccp_aes_ctx {
struct scatterlist key_sg;
unsigned int key_len;
- u8 key[AES_MAX_KEY_SIZE];
+ u8 key[AES_MAX_KEY_SIZE * 2];
u8 nonce[CTR_RFC3686_NONCE_SIZE];
@@ -227,12 +235,35 @@ struct ccp_sha_exp_ctx {
u8 buf[MAX_SHA_BLOCK_SIZE];
};
+/***** RSA related defines *****/
+
+struct ccp_rsa_ctx {
+ unsigned int key_len; /* in bits */
+ struct scatterlist e_sg;
+ u8 *e_buf;
+ unsigned int e_len;
+ struct scatterlist n_sg;
+ u8 *n_buf;
+ unsigned int n_len;
+ struct scatterlist d_sg;
+ u8 *d_buf;
+ unsigned int d_len;
+};
+
+struct ccp_rsa_req_ctx {
+ struct ccp_cmd cmd;
+};
+
+#define CCP_RSA_MAXMOD (4 * 1024 / 8)
+#define CCP5_RSA_MAXMOD (16 * 1024 / 8)
+
/***** Common Context Structure *****/
struct ccp_ctx {
int (*complete)(struct crypto_async_request *req, int ret);
union {
struct ccp_aes_ctx aes;
+ struct ccp_rsa_ctx rsa;
struct ccp_sha_ctx sha;
struct ccp_des3_ctx des3;
} u;
@@ -249,5 +280,6 @@ int ccp_register_aes_xts_algs(struct list_head *head);
int ccp_register_aes_aeads(struct list_head *head);
int ccp_register_sha_algs(struct list_head *head);
int ccp_register_des3_algs(struct list_head *head);
+int ccp_register_rsa_algs(struct list_head *head);
#endif
diff --git a/drivers/crypto/ccp/ccp-debugfs.c b/drivers/crypto/ccp/ccp-debugfs.c
index 3cd6c83754e0..59d4ca4e72d8 100644
--- a/drivers/crypto/ccp/ccp-debugfs.c
+++ b/drivers/crypto/ccp/ccp-debugfs.c
@@ -305,19 +305,19 @@ void ccp5_debugfs_setup(struct ccp_device *ccp)
ccp->debugfs_instance = debugfs_create_dir(ccp->name, ccp_debugfs_dir);
if (!ccp->debugfs_instance)
- return;
+ goto err;
debugfs_info = debugfs_create_file("info", 0400,
ccp->debugfs_instance, ccp,
&ccp_debugfs_info_ops);
if (!debugfs_info)
- return;
+ goto err;
debugfs_stats = debugfs_create_file("stats", 0600,
ccp->debugfs_instance, ccp,
&ccp_debugfs_stats_ops);
if (!debugfs_stats)
- return;
+ goto err;
for (i = 0; i < ccp->cmd_q_count; i++) {
cmd_q = &ccp->cmd_q[i];
@@ -327,15 +327,20 @@ void ccp5_debugfs_setup(struct ccp_device *ccp)
debugfs_q_instance =
debugfs_create_dir(name, ccp->debugfs_instance);
if (!debugfs_q_instance)
- return;
+ goto err;
debugfs_q_stats =
debugfs_create_file("stats", 0600,
debugfs_q_instance, cmd_q,
&ccp_debugfs_queue_ops);
if (!debugfs_q_stats)
- return;
+ goto err;
}
+
+ return;
+
+err:
+ debugfs_remove_recursive(ccp->debugfs_instance);
}
void ccp5_debugfs_destroy(void)
diff --git a/drivers/crypto/ccp/ccp-dev-v3.c b/drivers/crypto/ccp/ccp-dev-v3.c
index 367c2e30656f..240bebbcb8ac 100644
--- a/drivers/crypto/ccp/ccp-dev-v3.c
+++ b/drivers/crypto/ccp/ccp-dev-v3.c
@@ -1,7 +1,7 @@
/*
* AMD Cryptographic Coprocessor (CCP) driver
*
- * Copyright (C) 2013,2016 Advanced Micro Devices, Inc.
+ * Copyright (C) 2013,2017 Advanced Micro Devices, Inc.
*
* Author: Tom Lendacky <thomas.lendacky@amd.com>
* Author: Gary R Hook <gary.hook@amd.com>
@@ -359,8 +359,7 @@ static void ccp_irq_bh(unsigned long data)
static irqreturn_t ccp_irq_handler(int irq, void *data)
{
- struct device *dev = data;
- struct ccp_device *ccp = dev_get_drvdata(dev);
+ struct ccp_device *ccp = (struct ccp_device *)data;
ccp_disable_queue_interrupts(ccp);
if (ccp->use_tasklet)
@@ -454,7 +453,7 @@ static int ccp_init(struct ccp_device *ccp)
iowrite32(ccp->qim, ccp->io_regs + IRQ_STATUS_REG);
/* Request an irq */
- ret = ccp->get_irq(ccp);
+ ret = sp_request_ccp_irq(ccp->sp, ccp_irq_handler, ccp->name, ccp);
if (ret) {
dev_err(dev, "unable to allocate an IRQ\n");
goto e_pool;
@@ -511,7 +510,7 @@ e_kthread:
if (ccp->cmd_q[i].kthread)
kthread_stop(ccp->cmd_q[i].kthread);
- ccp->free_irq(ccp);
+ sp_free_ccp_irq(ccp->sp, ccp);
e_pool:
for (i = 0; i < ccp->cmd_q_count; i++)
@@ -550,7 +549,7 @@ static void ccp_destroy(struct ccp_device *ccp)
if (ccp->cmd_q[i].kthread)
kthread_stop(ccp->cmd_q[i].kthread);
- ccp->free_irq(ccp);
+ sp_free_ccp_irq(ccp->sp, ccp);
for (i = 0; i < ccp->cmd_q_count; i++)
dma_pool_destroy(ccp->cmd_q[i].dma_pool);
@@ -586,10 +585,17 @@ static const struct ccp_actions ccp3_actions = {
.irqhandler = ccp_irq_handler,
};
+const struct ccp_vdata ccpv3_platform = {
+ .version = CCP_VERSION(3, 0),
+ .setup = NULL,
+ .perform = &ccp3_actions,
+ .offset = 0,
+};
+
const struct ccp_vdata ccpv3 = {
.version = CCP_VERSION(3, 0),
.setup = NULL,
.perform = &ccp3_actions,
- .bar = 2,
.offset = 0x20000,
+ .rsamax = CCP_RSA_MAX_WIDTH,
};
diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c
index b10d2d2075cb..65604fc65e8f 100644
--- a/drivers/crypto/ccp/ccp-dev-v5.c
+++ b/drivers/crypto/ccp/ccp-dev-v5.c
@@ -1,7 +1,7 @@
/*
* AMD Cryptographic Coprocessor (CCP) driver
*
- * Copyright (C) 2016 Advanced Micro Devices, Inc.
+ * Copyright (C) 2016,2017 Advanced Micro Devices, Inc.
*
* Author: Gary R Hook <gary.hook@amd.com>
*
@@ -145,6 +145,7 @@ union ccp_function {
#define CCP_AES_MODE(p) ((p)->aes.mode)
#define CCP_AES_TYPE(p) ((p)->aes.type)
#define CCP_XTS_SIZE(p) ((p)->aes_xts.size)
+#define CCP_XTS_TYPE(p) ((p)->aes_xts.type)
#define CCP_XTS_ENCRYPT(p) ((p)->aes_xts.encrypt)
#define CCP_DES3_SIZE(p) ((p)->des3.size)
#define CCP_DES3_ENCRYPT(p) ((p)->des3.encrypt)
@@ -344,6 +345,7 @@ static int ccp5_perform_xts_aes(struct ccp_op *op)
CCP5_CMD_PROT(&desc) = 0;
function.raw = 0;
+ CCP_XTS_TYPE(&function) = op->u.xts.type;
CCP_XTS_ENCRYPT(&function) = op->u.xts.action;
CCP_XTS_SIZE(&function) = op->u.xts.unit_size;
CCP5_CMD_FUNCTION(&desc) = function.raw;
@@ -469,7 +471,7 @@ static int ccp5_perform_rsa(struct ccp_op *op)
CCP5_CMD_PROT(&desc) = 0;
function.raw = 0;
- CCP_RSA_SIZE(&function) = op->u.rsa.mod_size >> 3;
+ CCP_RSA_SIZE(&function) = (op->u.rsa.mod_size + 7) >> 3;
CCP5_CMD_FUNCTION(&desc) = function.raw;
CCP5_CMD_LEN(&desc) = op->u.rsa.input_len;
@@ -484,10 +486,10 @@ static int ccp5_perform_rsa(struct ccp_op *op)
CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma);
CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
- /* Exponent is in LSB memory */
- CCP5_CMD_KEY_LO(&desc) = op->sb_key * LSB_ITEM_SIZE;
- CCP5_CMD_KEY_HI(&desc) = 0;
- CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SB;
+ /* Key (Exponent) is in external memory */
+ CCP5_CMD_KEY_LO(&desc) = ccp_addr_lo(&op->exp.u.dma);
+ CCP5_CMD_KEY_HI(&desc) = ccp_addr_hi(&op->exp.u.dma);
+ CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
return ccp5_do_cmd(&desc, op->cmd_q);
}
@@ -769,8 +771,7 @@ static void ccp5_irq_bh(unsigned long data)
static irqreturn_t ccp5_irq_handler(int irq, void *data)
{
- struct device *dev = data;
- struct ccp_device *ccp = dev_get_drvdata(dev);
+ struct ccp_device *ccp = (struct ccp_device *)data;
ccp5_disable_queue_interrupts(ccp);
ccp->total_interrupts++;
@@ -881,7 +882,7 @@ static int ccp5_init(struct ccp_device *ccp)
dev_dbg(dev, "Requesting an IRQ...\n");
/* Request an irq */
- ret = ccp->get_irq(ccp);
+ ret = sp_request_ccp_irq(ccp->sp, ccp5_irq_handler, ccp->name, ccp);
if (ret) {
dev_err(dev, "unable to allocate an IRQ\n");
goto e_pool;
@@ -987,7 +988,7 @@ e_kthread:
kthread_stop(ccp->cmd_q[i].kthread);
e_irq:
- ccp->free_irq(ccp);
+ sp_free_ccp_irq(ccp->sp, ccp);
e_pool:
for (i = 0; i < ccp->cmd_q_count; i++)
@@ -1037,7 +1038,7 @@ static void ccp5_destroy(struct ccp_device *ccp)
if (ccp->cmd_q[i].kthread)
kthread_stop(ccp->cmd_q[i].kthread);
- ccp->free_irq(ccp);
+ sp_free_ccp_irq(ccp->sp, ccp);
for (i = 0; i < ccp->cmd_q_count; i++) {
cmd_q = &ccp->cmd_q[i];
@@ -1106,15 +1107,14 @@ static const struct ccp_actions ccp5_actions = {
.init = ccp5_init,
.destroy = ccp5_destroy,
.get_free_slots = ccp5_get_free_slots,
- .irqhandler = ccp5_irq_handler,
};
const struct ccp_vdata ccpv5a = {
.version = CCP_VERSION(5, 0),
.setup = ccp5_config,
.perform = &ccp5_actions,
- .bar = 2,
.offset = 0x0,
+ .rsamax = CCP5_RSA_MAX_WIDTH,
};
const struct ccp_vdata ccpv5b = {
@@ -1122,6 +1122,6 @@ const struct ccp_vdata ccpv5b = {
.dma_chan_attr = DMA_PRIVATE,
.setup = ccp5other_config,
.perform = &ccp5_actions,
- .bar = 2,
.offset = 0x0,
+ .rsamax = CCP5_RSA_MAX_WIDTH,
};
diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c
index 2506b5025700..4e029b176641 100644
--- a/drivers/crypto/ccp/ccp-dev.c
+++ b/drivers/crypto/ccp/ccp-dev.c
@@ -1,7 +1,7 @@
/*
* AMD Cryptographic Coprocessor (CCP) driver
*
- * Copyright (C) 2013,2016 Advanced Micro Devices, Inc.
+ * Copyright (C) 2013,2017 Advanced Micro Devices, Inc.
*
* Author: Tom Lendacky <thomas.lendacky@amd.com>
* Author: Gary R Hook <gary.hook@amd.com>
@@ -11,7 +11,6 @@
* published by the Free Software Foundation.
*/
-#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/kthread.h>
#include <linux/sched.h>
@@ -30,12 +29,6 @@
#include "ccp-dev.h"
-MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>");
-MODULE_AUTHOR("Gary R Hook <gary.hook@amd.com>");
-MODULE_LICENSE("GPL");
-MODULE_VERSION("1.1.0");
-MODULE_DESCRIPTION("AMD Cryptographic Coprocessor driver");
-
struct ccp_tasklet_data {
struct completion completion;
struct ccp_cmd *cmd;
@@ -111,13 +104,6 @@ static LIST_HEAD(ccp_units);
static DEFINE_SPINLOCK(ccp_rr_lock);
static struct ccp_device *ccp_rr;
-/* Ever-increasing value to produce unique unit numbers */
-static atomic_t ccp_unit_ordinal;
-static unsigned int ccp_increment_unit_ordinal(void)
-{
- return atomic_inc_return(&ccp_unit_ordinal);
-}
-
/**
* ccp_add_device - add a CCP device to the list
*
@@ -415,6 +401,7 @@ static void ccp_do_cmd_complete(unsigned long data)
struct ccp_cmd *cmd = tdata->cmd;
cmd->callback(cmd->data, cmd->ret);
+
complete(&tdata->completion);
}
@@ -464,14 +451,17 @@ int ccp_cmd_queue_thread(void *data)
*
* @dev: device struct of the CCP
*/
-struct ccp_device *ccp_alloc_struct(struct device *dev)
+struct ccp_device *ccp_alloc_struct(struct sp_device *sp)
{
+ struct device *dev = sp->dev;
struct ccp_device *ccp;
ccp = devm_kzalloc(dev, sizeof(*ccp), GFP_KERNEL);
if (!ccp)
return NULL;
ccp->dev = dev;
+ ccp->sp = sp;
+ ccp->axcache = sp->axcache;
INIT_LIST_HEAD(&ccp->cmd);
INIT_LIST_HEAD(&ccp->backlog);
@@ -486,9 +476,8 @@ struct ccp_device *ccp_alloc_struct(struct device *dev)
init_waitqueue_head(&ccp->sb_queue);
init_waitqueue_head(&ccp->suspend_queue);
- ccp->ord = ccp_increment_unit_ordinal();
- snprintf(ccp->name, MAX_CCP_NAME_LEN, "ccp-%u", ccp->ord);
- snprintf(ccp->rngname, MAX_CCP_NAME_LEN, "ccp-%u-rng", ccp->ord);
+ snprintf(ccp->name, MAX_CCP_NAME_LEN, "ccp-%u", sp->ord);
+ snprintf(ccp->rngname, MAX_CCP_NAME_LEN, "ccp-%u-rng", sp->ord);
return ccp;
}
@@ -538,55 +527,100 @@ bool ccp_queues_suspended(struct ccp_device *ccp)
return ccp->cmd_q_count == suspended;
}
-#endif
-static int __init ccp_mod_init(void)
+int ccp_dev_suspend(struct sp_device *sp, pm_message_t state)
{
-#ifdef CONFIG_X86
- int ret;
+ struct ccp_device *ccp = sp->ccp_data;
+ unsigned long flags;
+ unsigned int i;
- ret = ccp_pci_init();
- if (ret)
- return ret;
+ spin_lock_irqsave(&ccp->cmd_lock, flags);
- /* Don't leave the driver loaded if init failed */
- if (ccp_present() != 0) {
- ccp_pci_exit();
- return -ENODEV;
+ ccp->suspending = 1;
+
+ /* Wake all the queue kthreads to prepare for suspend */
+ for (i = 0; i < ccp->cmd_q_count; i++)
+ wake_up_process(ccp->cmd_q[i].kthread);
+
+ spin_unlock_irqrestore(&ccp->cmd_lock, flags);
+
+ /* Wait for all queue kthreads to say they're done */
+ while (!ccp_queues_suspended(ccp))
+ wait_event_interruptible(ccp->suspend_queue,
+ ccp_queues_suspended(ccp));
+
+ return 0;
+}
+
+int ccp_dev_resume(struct sp_device *sp)
+{
+ struct ccp_device *ccp = sp->ccp_data;
+ unsigned long flags;
+ unsigned int i;
+
+ spin_lock_irqsave(&ccp->cmd_lock, flags);
+
+ ccp->suspending = 0;
+
+ /* Wake up all the kthreads */
+ for (i = 0; i < ccp->cmd_q_count; i++) {
+ ccp->cmd_q[i].suspended = 0;
+ wake_up_process(ccp->cmd_q[i].kthread);
}
+ spin_unlock_irqrestore(&ccp->cmd_lock, flags);
+
return 0;
+}
#endif
-#ifdef CONFIG_ARM64
+int ccp_dev_init(struct sp_device *sp)
+{
+ struct device *dev = sp->dev;
+ struct ccp_device *ccp;
int ret;
- ret = ccp_platform_init();
+ ret = -ENOMEM;
+ ccp = ccp_alloc_struct(sp);
+ if (!ccp)
+ goto e_err;
+ sp->ccp_data = ccp;
+
+ ccp->vdata = (struct ccp_vdata *)sp->dev_vdata->ccp_vdata;
+ if (!ccp->vdata || !ccp->vdata->version) {
+ ret = -ENODEV;
+ dev_err(dev, "missing driver data\n");
+ goto e_err;
+ }
+
+ ccp->use_tasklet = sp->use_tasklet;
+
+ ccp->io_regs = sp->io_map + ccp->vdata->offset;
+ if (ccp->vdata->setup)
+ ccp->vdata->setup(ccp);
+
+ ret = ccp->vdata->perform->init(ccp);
if (ret)
- return ret;
+ goto e_err;
- /* Don't leave the driver loaded if init failed */
- if (ccp_present() != 0) {
- ccp_platform_exit();
- return -ENODEV;
- }
+ dev_notice(dev, "ccp enabled\n");
return 0;
-#endif
- return -ENODEV;
+e_err:
+ sp->ccp_data = NULL;
+
+ dev_notice(dev, "ccp initialization failed\n");
+
+ return ret;
}
-static void __exit ccp_mod_exit(void)
+void ccp_dev_destroy(struct sp_device *sp)
{
-#ifdef CONFIG_X86
- ccp_pci_exit();
-#endif
+ struct ccp_device *ccp = sp->ccp_data;
-#ifdef CONFIG_ARM64
- ccp_platform_exit();
-#endif
-}
+ if (!ccp)
+ return;
-module_init(ccp_mod_init);
-module_exit(ccp_mod_exit);
+ ccp->vdata->perform->destroy(ccp);
+}
diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h
index a70154ac7405..6810b65c1939 100644
--- a/drivers/crypto/ccp/ccp-dev.h
+++ b/drivers/crypto/ccp/ccp-dev.h
@@ -1,7 +1,7 @@
/*
* AMD Cryptographic Coprocessor (CCP) driver
*
- * Copyright (C) 2013,2016 Advanced Micro Devices, Inc.
+ * Copyright (C) 2013,2017 Advanced Micro Devices, Inc.
*
* Author: Tom Lendacky <thomas.lendacky@amd.com>
* Author: Gary R Hook <gary.hook@amd.com>
@@ -27,6 +27,8 @@
#include <linux/irqreturn.h>
#include <linux/dmaengine.h>
+#include "sp-dev.h"
+
#define MAX_CCP_NAME_LEN 16
#define MAX_DMAPOOL_NAME_LEN 32
@@ -192,6 +194,7 @@
#define CCP_AES_CTX_SB_COUNT 1
#define CCP_XTS_AES_KEY_SB_COUNT 1
+#define CCP5_XTS_AES_KEY_SB_COUNT 2
#define CCP_XTS_AES_CTX_SB_COUNT 1
#define CCP_DES3_KEY_SB_COUNT 1
@@ -200,6 +203,7 @@
#define CCP_SHA_SB_COUNT 1
#define CCP_RSA_MAX_WIDTH 4096
+#define CCP5_RSA_MAX_WIDTH 16384
#define CCP_PASSTHRU_BLOCKSIZE 256
#define CCP_PASSTHRU_MASKSIZE 32
@@ -344,12 +348,11 @@ struct ccp_device {
char rngname[MAX_CCP_NAME_LEN];
struct device *dev;
+ struct sp_device *sp;
/* Bus specific device information
*/
void *dev_specific;
- int (*get_irq)(struct ccp_device *ccp);
- void (*free_irq)(struct ccp_device *ccp);
unsigned int qim;
unsigned int irq;
bool use_tasklet;
@@ -362,7 +365,6 @@ struct ccp_device {
* them.
*/
struct mutex req_mutex ____cacheline_aligned;
- void __iomem *io_map;
void __iomem *io_regs;
/* Master lists that all cmds are queued on. Because there can be
@@ -497,6 +499,7 @@ struct ccp_aes_op {
};
struct ccp_xts_aes_op {
+ enum ccp_aes_type type;
enum ccp_aes_action action;
enum ccp_xts_aes_unit_size unit_size;
};
@@ -626,18 +629,12 @@ struct ccp5_desc {
struct dword7 dw7;
};
-int ccp_pci_init(void);
-void ccp_pci_exit(void);
-
-int ccp_platform_init(void);
-void ccp_platform_exit(void);
-
void ccp_add_device(struct ccp_device *ccp);
void ccp_del_device(struct ccp_device *ccp);
extern void ccp_log_error(struct ccp_device *, int);
-struct ccp_device *ccp_alloc_struct(struct device *dev);
+struct ccp_device *ccp_alloc_struct(struct sp_device *sp);
bool ccp_queues_suspended(struct ccp_device *ccp);
int ccp_cmd_queue_thread(void *data);
int ccp_trng_read(struct hwrng *rng, void *data, size_t max, bool wait);
@@ -669,16 +666,7 @@ struct ccp_actions {
irqreturn_t (*irqhandler)(int, void *);
};
-/* Structure to hold CCP version-specific values */
-struct ccp_vdata {
- const unsigned int version;
- const unsigned int dma_chan_attr;
- void (*setup)(struct ccp_device *);
- const struct ccp_actions *perform;
- const unsigned int bar;
- const unsigned int offset;
-};
-
+extern const struct ccp_vdata ccpv3_platform;
extern const struct ccp_vdata ccpv3;
extern const struct ccp_vdata ccpv5a;
extern const struct ccp_vdata ccpv5b;
diff --git a/drivers/crypto/ccp/ccp-dmaengine.c b/drivers/crypto/ccp/ccp-dmaengine.c
index e00be01fbf5a..d608043c0280 100644
--- a/drivers/crypto/ccp/ccp-dmaengine.c
+++ b/drivers/crypto/ccp/ccp-dmaengine.c
@@ -1,7 +1,7 @@
/*
* AMD Cryptographic Coprocessor (CCP) driver
*
- * Copyright (C) 2016 Advanced Micro Devices, Inc.
+ * Copyright (C) 2016,2017 Advanced Micro Devices, Inc.
*
* Author: Gary R Hook <gary.hook@amd.com>
*
@@ -502,27 +502,6 @@ static struct dma_async_tx_descriptor *ccp_prep_dma_memcpy(
return &desc->tx_desc;
}
-static struct dma_async_tx_descriptor *ccp_prep_dma_sg(
- struct dma_chan *dma_chan, struct scatterlist *dst_sg,
- unsigned int dst_nents, struct scatterlist *src_sg,
- unsigned int src_nents, unsigned long flags)
-{
- struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
- dma_chan);
- struct ccp_dma_desc *desc;
-
- dev_dbg(chan->ccp->dev,
- "%s - src=%p, src_nents=%u dst=%p, dst_nents=%u, flags=%#lx\n",
- __func__, src_sg, src_nents, dst_sg, dst_nents, flags);
-
- desc = ccp_create_desc(dma_chan, dst_sg, dst_nents, src_sg, src_nents,
- flags);
- if (!desc)
- return NULL;
-
- return &desc->tx_desc;
-}
-
static struct dma_async_tx_descriptor *ccp_prep_dma_interrupt(
struct dma_chan *dma_chan, unsigned long flags)
{
@@ -704,7 +683,6 @@ int ccp_dmaengine_register(struct ccp_device *ccp)
dma_dev->directions = DMA_MEM_TO_MEM;
dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
- dma_cap_set(DMA_SG, dma_dev->cap_mask);
dma_cap_set(DMA_INTERRUPT, dma_dev->cap_mask);
/* The DMA channels for this device can be set to public or private,
@@ -740,7 +718,6 @@ int ccp_dmaengine_register(struct ccp_device *ccp)
dma_dev->device_free_chan_resources = ccp_free_chan_resources;
dma_dev->device_prep_dma_memcpy = ccp_prep_dma_memcpy;
- dma_dev->device_prep_dma_sg = ccp_prep_dma_sg;
dma_dev->device_prep_dma_interrupt = ccp_prep_dma_interrupt;
dma_dev->device_issue_pending = ccp_issue_pending;
dma_dev->device_tx_status = ccp_tx_status;
diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
index c0dfdacbdff5..406b95329b3d 100644
--- a/drivers/crypto/ccp/ccp-ops.c
+++ b/drivers/crypto/ccp/ccp-ops.c
@@ -1,7 +1,7 @@
/*
* AMD Cryptographic Coprocessor (CCP) driver
*
- * Copyright (C) 2013,2016 Advanced Micro Devices, Inc.
+ * Copyright (C) 2013,2017 Advanced Micro Devices, Inc.
*
* Author: Tom Lendacky <thomas.lendacky@amd.com>
* Author: Gary R Hook <gary.hook@amd.com>
@@ -168,7 +168,7 @@ static int ccp_init_dm_workarea(struct ccp_dm_workarea *wa,
wa->dma.address = dma_map_single(wa->dev, wa->address, len,
dir);
- if (!wa->dma.address)
+ if (dma_mapping_error(wa->dev, wa->dma.address))
return -ENOMEM;
wa->dma.length = len;
@@ -1038,6 +1038,8 @@ static int ccp_run_xts_aes_cmd(struct ccp_cmd_queue *cmd_q,
struct ccp_op op;
unsigned int unit_size, dm_offset;
bool in_place = false;
+ unsigned int sb_count;
+ enum ccp_aes_type aestype;
int ret;
switch (xts->unit_size) {
@@ -1061,7 +1063,11 @@ static int ccp_run_xts_aes_cmd(struct ccp_cmd_queue *cmd_q,
return -EINVAL;
}
- if (xts->key_len != AES_KEYSIZE_128)
+ if (xts->key_len == AES_KEYSIZE_128)
+ aestype = CCP_AES_TYPE_128;
+ else if (xts->key_len == AES_KEYSIZE_256)
+ aestype = CCP_AES_TYPE_256;
+ else
return -EINVAL;
if (!xts->final && (xts->src_len & (AES_BLOCK_SIZE - 1)))
@@ -1083,23 +1089,44 @@ static int ccp_run_xts_aes_cmd(struct ccp_cmd_queue *cmd_q,
op.sb_key = cmd_q->sb_key;
op.sb_ctx = cmd_q->sb_ctx;
op.init = 1;
+ op.u.xts.type = aestype;
op.u.xts.action = xts->action;
op.u.xts.unit_size = xts->unit_size;
- /* All supported key sizes fit in a single (32-byte) SB entry
- * and must be in little endian format. Use the 256-bit byte
- * swap passthru option to convert from big endian to little
- * endian.
+ /* A version 3 device only supports 128-bit keys, which fits into a
+ * single SB entry. A version 5 device uses a 512-bit vector, so two
+ * SB entries.
*/
+ if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0))
+ sb_count = CCP_XTS_AES_KEY_SB_COUNT;
+ else
+ sb_count = CCP5_XTS_AES_KEY_SB_COUNT;
ret = ccp_init_dm_workarea(&key, cmd_q,
- CCP_XTS_AES_KEY_SB_COUNT * CCP_SB_BYTES,
+ sb_count * CCP_SB_BYTES,
DMA_TO_DEVICE);
if (ret)
return ret;
- dm_offset = CCP_SB_BYTES - AES_KEYSIZE_128;
- ccp_set_dm_area(&key, dm_offset, xts->key, 0, xts->key_len);
- ccp_set_dm_area(&key, 0, xts->key, dm_offset, xts->key_len);
+ if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0)) {
+ /* All supported key sizes must be in little endian format.
+ * Use the 256-bit byte swap passthru option to convert from
+ * big endian to little endian.
+ */
+ dm_offset = CCP_SB_BYTES - AES_KEYSIZE_128;
+ ccp_set_dm_area(&key, dm_offset, xts->key, 0, xts->key_len);
+ ccp_set_dm_area(&key, 0, xts->key, xts->key_len, xts->key_len);
+ } else {
+ /* Version 5 CCPs use a 512-bit space for the key: each portion
+ * occupies 256 bits, or one entire slot, and is zero-padded.
+ */
+ unsigned int pad;
+
+ dm_offset = CCP_SB_BYTES;
+ pad = dm_offset - xts->key_len;
+ ccp_set_dm_area(&key, pad, xts->key, 0, xts->key_len);
+ ccp_set_dm_area(&key, dm_offset + pad, xts->key, xts->key_len,
+ xts->key_len);
+ }
ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
CCP_PASSTHRU_BYTESWAP_256BIT);
if (ret) {
@@ -1731,42 +1758,53 @@ e_ctx:
static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
{
struct ccp_rsa_engine *rsa = &cmd->u.rsa;
- struct ccp_dm_workarea exp, src;
- struct ccp_data dst;
+ struct ccp_dm_workarea exp, src, dst;
struct ccp_op op;
unsigned int sb_count, i_len, o_len;
int ret;
- if (rsa->key_size > CCP_RSA_MAX_WIDTH)
+ /* Check against the maximum allowable size, in bits */
+ if (rsa->key_size > cmd_q->ccp->vdata->rsamax)
return -EINVAL;
if (!rsa->exp || !rsa->mod || !rsa->src || !rsa->dst)
return -EINVAL;
+ memset(&op, 0, sizeof(op));
+ op.cmd_q = cmd_q;
+ op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
+
/* The RSA modulus must precede the message being acted upon, so
* it must be copied to a DMA area where the message and the
* modulus can be concatenated. Therefore the input buffer
* length required is twice the output buffer length (which
- * must be a multiple of 256-bits).
+ * must be a multiple of 256-bits). Compute o_len, i_len in bytes.
+ * Buffer sizes must be a multiple of 32 bytes; rounding up may be
+ * required.
*/
- o_len = ((rsa->key_size + 255) / 256) * 32;
+ o_len = 32 * ((rsa->key_size + 255) / 256);
i_len = o_len * 2;
- sb_count = o_len / CCP_SB_BYTES;
-
- memset(&op, 0, sizeof(op));
- op.cmd_q = cmd_q;
- op.jobid = ccp_gen_jobid(cmd_q->ccp);
- op.sb_key = cmd_q->ccp->vdata->perform->sballoc(cmd_q, sb_count);
-
- if (!op.sb_key)
- return -EIO;
+ sb_count = 0;
+ if (cmd_q->ccp->vdata->version < CCP_VERSION(5, 0)) {
+ /* sb_count is the number of storage block slots required
+ * for the modulus.
+ */
+ sb_count = o_len / CCP_SB_BYTES;
+ op.sb_key = cmd_q->ccp->vdata->perform->sballoc(cmd_q,
+ sb_count);
+ if (!op.sb_key)
+ return -EIO;
+ } else {
+ /* A version 5 device allows a modulus size that will not fit
+ * in the LSB, so the command will transfer it from memory.
+ * Set the sb key to the default, even though it's not used.
+ */
+ op.sb_key = cmd_q->sb_key;
+ }
- /* The RSA exponent may span multiple (32-byte) SB entries and must
- * be in little endian format. Reverse copy each 32-byte chunk
- * of the exponent (En chunk to E0 chunk, E(n-1) chunk to E1 chunk)
- * and each byte within that chunk and do not perform any byte swap
- * operations on the passthru operation.
+ /* The RSA exponent must be in little endian format. Reverse its
+ * byte order.
*/
ret = ccp_init_dm_workarea(&exp, cmd_q, o_len, DMA_TO_DEVICE);
if (ret)
@@ -1775,11 +1813,22 @@ static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
ret = ccp_reverse_set_dm_area(&exp, 0, rsa->exp, 0, rsa->exp_len);
if (ret)
goto e_exp;
- ret = ccp_copy_to_sb(cmd_q, &exp, op.jobid, op.sb_key,
- CCP_PASSTHRU_BYTESWAP_NOOP);
- if (ret) {
- cmd->engine_error = cmd_q->cmd_error;
- goto e_exp;
+
+ if (cmd_q->ccp->vdata->version < CCP_VERSION(5, 0)) {
+ /* Copy the exponent to the local storage block, using
+ * as many 32-byte blocks as were allocated above. It's
+ * already little endian, so no further change is required.
+ */
+ ret = ccp_copy_to_sb(cmd_q, &exp, op.jobid, op.sb_key,
+ CCP_PASSTHRU_BYTESWAP_NOOP);
+ if (ret) {
+ cmd->engine_error = cmd_q->cmd_error;
+ goto e_exp;
+ }
+ } else {
+ /* The exponent can be retrieved from memory via DMA. */
+ op.exp.u.dma.address = exp.dma.address;
+ op.exp.u.dma.offset = 0;
}
/* Concatenate the modulus and the message. Both the modulus and
@@ -1798,8 +1847,7 @@ static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
goto e_src;
/* Prepare the output area for the operation */
- ret = ccp_init_data(&dst, cmd_q, rsa->dst, rsa->mod_len,
- o_len, DMA_FROM_DEVICE);
+ ret = ccp_init_dm_workarea(&dst, cmd_q, o_len, DMA_FROM_DEVICE);
if (ret)
goto e_src;
@@ -1807,7 +1855,7 @@ static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
op.src.u.dma.address = src.dma.address;
op.src.u.dma.offset = 0;
op.src.u.dma.length = i_len;
- op.dst.u.dma.address = dst.dm_wa.dma.address;
+ op.dst.u.dma.address = dst.dma.address;
op.dst.u.dma.offset = 0;
op.dst.u.dma.length = o_len;
@@ -1820,10 +1868,10 @@ static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
goto e_dst;
}
- ccp_reverse_get_dm_area(&dst.dm_wa, 0, rsa->dst, 0, rsa->mod_len);
+ ccp_reverse_get_dm_area(&dst, 0, rsa->dst, 0, rsa->mod_len);
e_dst:
- ccp_free_data(&dst, cmd_q);
+ ccp_dm_free(&dst);
e_src:
ccp_dm_free(&src);
@@ -1832,7 +1880,8 @@ e_exp:
ccp_dm_free(&exp);
e_sb:
- cmd_q->ccp->vdata->perform->sbfree(cmd_q, op.sb_key, sb_count);
+ if (sb_count)
+ cmd_q->ccp->vdata->perform->sbfree(cmd_q, op.sb_key, sb_count);
return ret;
}
@@ -1992,7 +2041,7 @@ static int ccp_run_passthru_nomap_cmd(struct ccp_cmd_queue *cmd_q,
memset(&op, 0, sizeof(op));
op.cmd_q = cmd_q;
- op.jobid = ccp_gen_jobid(cmd_q->ccp);
+ op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) {
/* Load the mask */
diff --git a/drivers/crypto/ccp/ccp-pci.c b/drivers/crypto/ccp/ccp-pci.c
deleted file mode 100644
index e880d4cf4ada..000000000000
--- a/drivers/crypto/ccp/ccp-pci.c
+++ /dev/null
@@ -1,356 +0,0 @@
-/*
- * AMD Cryptographic Coprocessor (CCP) driver
- *
- * Copyright (C) 2013,2016 Advanced Micro Devices, Inc.
- *
- * Author: Tom Lendacky <thomas.lendacky@amd.com>
- * Author: Gary R Hook <gary.hook@amd.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/device.h>
-#include <linux/pci.h>
-#include <linux/pci_ids.h>
-#include <linux/dma-mapping.h>
-#include <linux/kthread.h>
-#include <linux/sched.h>
-#include <linux/interrupt.h>
-#include <linux/spinlock.h>
-#include <linux/delay.h>
-#include <linux/ccp.h>
-
-#include "ccp-dev.h"
-
-#define MSIX_VECTORS 2
-
-struct ccp_msix {
- u32 vector;
- char name[16];
-};
-
-struct ccp_pci {
- int msix_count;
- struct ccp_msix msix[MSIX_VECTORS];
-};
-
-static int ccp_get_msix_irqs(struct ccp_device *ccp)
-{
- struct ccp_pci *ccp_pci = ccp->dev_specific;
- struct device *dev = ccp->dev;
- struct pci_dev *pdev = to_pci_dev(dev);
- struct msix_entry msix_entry[MSIX_VECTORS];
- unsigned int name_len = sizeof(ccp_pci->msix[0].name) - 1;
- int v, ret;
-
- for (v = 0; v < ARRAY_SIZE(msix_entry); v++)
- msix_entry[v].entry = v;
-
- ret = pci_enable_msix_range(pdev, msix_entry, 1, v);
- if (ret < 0)
- return ret;
-
- ccp_pci->msix_count = ret;
- for (v = 0; v < ccp_pci->msix_count; v++) {
- /* Set the interrupt names and request the irqs */
- snprintf(ccp_pci->msix[v].name, name_len, "%s-%u",
- ccp->name, v);
- ccp_pci->msix[v].vector = msix_entry[v].vector;
- ret = request_irq(ccp_pci->msix[v].vector,
- ccp->vdata->perform->irqhandler,
- 0, ccp_pci->msix[v].name, dev);
- if (ret) {
- dev_notice(dev, "unable to allocate MSI-X IRQ (%d)\n",
- ret);
- goto e_irq;
- }
- }
- ccp->use_tasklet = true;
-
- return 0;
-
-e_irq:
- while (v--)
- free_irq(ccp_pci->msix[v].vector, dev);
-
- pci_disable_msix(pdev);
-
- ccp_pci->msix_count = 0;
-
- return ret;
-}
-
-static int ccp_get_msi_irq(struct ccp_device *ccp)
-{
- struct device *dev = ccp->dev;
- struct pci_dev *pdev = to_pci_dev(dev);
- int ret;
-
- ret = pci_enable_msi(pdev);
- if (ret)
- return ret;
-
- ccp->irq = pdev->irq;
- ret = request_irq(ccp->irq, ccp->vdata->perform->irqhandler, 0,
- ccp->name, dev);
- if (ret) {
- dev_notice(dev, "unable to allocate MSI IRQ (%d)\n", ret);
- goto e_msi;
- }
- ccp->use_tasklet = true;
-
- return 0;
-
-e_msi:
- pci_disable_msi(pdev);
-
- return ret;
-}
-
-static int ccp_get_irqs(struct ccp_device *ccp)
-{
- struct device *dev = ccp->dev;
- int ret;
-
- ret = ccp_get_msix_irqs(ccp);
- if (!ret)
- return 0;
-
- /* Couldn't get MSI-X vectors, try MSI */
- dev_notice(dev, "could not enable MSI-X (%d), trying MSI\n", ret);
- ret = ccp_get_msi_irq(ccp);
- if (!ret)
- return 0;
-
- /* Couldn't get MSI interrupt */
- dev_notice(dev, "could not enable MSI (%d)\n", ret);
-
- return ret;
-}
-
-static void ccp_free_irqs(struct ccp_device *ccp)
-{
- struct ccp_pci *ccp_pci = ccp->dev_specific;
- struct device *dev = ccp->dev;
- struct pci_dev *pdev = to_pci_dev(dev);
-
- if (ccp_pci->msix_count) {
- while (ccp_pci->msix_count--)
- free_irq(ccp_pci->msix[ccp_pci->msix_count].vector,
- dev);
- pci_disable_msix(pdev);
- } else if (ccp->irq) {
- free_irq(ccp->irq, dev);
- pci_disable_msi(pdev);
- }
- ccp->irq = 0;
-}
-
-static int ccp_find_mmio_area(struct ccp_device *ccp)
-{
- struct device *dev = ccp->dev;
- struct pci_dev *pdev = to_pci_dev(dev);
- resource_size_t io_len;
- unsigned long io_flags;
-
- io_flags = pci_resource_flags(pdev, ccp->vdata->bar);
- io_len = pci_resource_len(pdev, ccp->vdata->bar);
- if ((io_flags & IORESOURCE_MEM) &&
- (io_len >= (ccp->vdata->offset + 0x800)))
- return ccp->vdata->bar;
-
- return -EIO;
-}
-
-static int ccp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
-{
- struct ccp_device *ccp;
- struct ccp_pci *ccp_pci;
- struct device *dev = &pdev->dev;
- unsigned int bar;
- int ret;
-
- ret = -ENOMEM;
- ccp = ccp_alloc_struct(dev);
- if (!ccp)
- goto e_err;
-
- ccp_pci = devm_kzalloc(dev, sizeof(*ccp_pci), GFP_KERNEL);
- if (!ccp_pci)
- goto e_err;
-
- ccp->dev_specific = ccp_pci;
- ccp->vdata = (struct ccp_vdata *)id->driver_data;
- if (!ccp->vdata || !ccp->vdata->version) {
- ret = -ENODEV;
- dev_err(dev, "missing driver data\n");
- goto e_err;
- }
- ccp->get_irq = ccp_get_irqs;
- ccp->free_irq = ccp_free_irqs;
-
- ret = pci_request_regions(pdev, "ccp");
- if (ret) {
- dev_err(dev, "pci_request_regions failed (%d)\n", ret);
- goto e_err;
- }
-
- ret = pci_enable_device(pdev);
- if (ret) {
- dev_err(dev, "pci_enable_device failed (%d)\n", ret);
- goto e_regions;
- }
-
- pci_set_master(pdev);
-
- ret = ccp_find_mmio_area(ccp);
- if (ret < 0)
- goto e_device;
- bar = ret;
-
- ret = -EIO;
- ccp->io_map = pci_iomap(pdev, bar, 0);
- if (!ccp->io_map) {
- dev_err(dev, "pci_iomap failed\n");
- goto e_device;
- }
- ccp->io_regs = ccp->io_map + ccp->vdata->offset;
-
- ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
- if (ret) {
- ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
- if (ret) {
- dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n",
- ret);
- goto e_iomap;
- }
- }
-
- dev_set_drvdata(dev, ccp);
-
- if (ccp->vdata->setup)
- ccp->vdata->setup(ccp);
-
- ret = ccp->vdata->perform->init(ccp);
- if (ret)
- goto e_iomap;
-
- dev_notice(dev, "enabled\n");
-
- return 0;
-
-e_iomap:
- pci_iounmap(pdev, ccp->io_map);
-
-e_device:
- pci_disable_device(pdev);
-
-e_regions:
- pci_release_regions(pdev);
-
-e_err:
- dev_notice(dev, "initialization failed\n");
- return ret;
-}
-
-static void ccp_pci_remove(struct pci_dev *pdev)
-{
- struct device *dev = &pdev->dev;
- struct ccp_device *ccp = dev_get_drvdata(dev);
-
- if (!ccp)
- return;
-
- ccp->vdata->perform->destroy(ccp);
-
- pci_iounmap(pdev, ccp->io_map);
-
- pci_disable_device(pdev);
-
- pci_release_regions(pdev);
-
- dev_notice(dev, "disabled\n");
-}
-
-#ifdef CONFIG_PM
-static int ccp_pci_suspend(struct pci_dev *pdev, pm_message_t state)
-{
- struct device *dev = &pdev->dev;
- struct ccp_device *ccp = dev_get_drvdata(dev);
- unsigned long flags;
- unsigned int i;
-
- spin_lock_irqsave(&ccp->cmd_lock, flags);
-
- ccp->suspending = 1;
-
- /* Wake all the queue kthreads to prepare for suspend */
- for (i = 0; i < ccp->cmd_q_count; i++)
- wake_up_process(ccp->cmd_q[i].kthread);
-
- spin_unlock_irqrestore(&ccp->cmd_lock, flags);
-
- /* Wait for all queue kthreads to say they're done */
- while (!ccp_queues_suspended(ccp))
- wait_event_interruptible(ccp->suspend_queue,
- ccp_queues_suspended(ccp));
-
- return 0;
-}
-
-static int ccp_pci_resume(struct pci_dev *pdev)
-{
- struct device *dev = &pdev->dev;
- struct ccp_device *ccp = dev_get_drvdata(dev);
- unsigned long flags;
- unsigned int i;
-
- spin_lock_irqsave(&ccp->cmd_lock, flags);
-
- ccp->suspending = 0;
-
- /* Wake up all the kthreads */
- for (i = 0; i < ccp->cmd_q_count; i++) {
- ccp->cmd_q[i].suspended = 0;
- wake_up_process(ccp->cmd_q[i].kthread);
- }
-
- spin_unlock_irqrestore(&ccp->cmd_lock, flags);
-
- return 0;
-}
-#endif
-
-static const struct pci_device_id ccp_pci_table[] = {
- { PCI_VDEVICE(AMD, 0x1537), (kernel_ulong_t)&ccpv3 },
- { PCI_VDEVICE(AMD, 0x1456), (kernel_ulong_t)&ccpv5a },
- { PCI_VDEVICE(AMD, 0x1468), (kernel_ulong_t)&ccpv5b },
- /* Last entry must be zero */
- { 0, }
-};
-MODULE_DEVICE_TABLE(pci, ccp_pci_table);
-
-static struct pci_driver ccp_pci_driver = {
- .name = "ccp",
- .id_table = ccp_pci_table,
- .probe = ccp_pci_probe,
- .remove = ccp_pci_remove,
-#ifdef CONFIG_PM
- .suspend = ccp_pci_suspend,
- .resume = ccp_pci_resume,
-#endif
-};
-
-int ccp_pci_init(void)
-{
- return pci_register_driver(&ccp_pci_driver);
-}
-
-void ccp_pci_exit(void)
-{
- pci_unregister_driver(&ccp_pci_driver);
-}
diff --git a/drivers/crypto/ccp/ccp-platform.c b/drivers/crypto/ccp/ccp-platform.c
deleted file mode 100644
index e26969e601ad..000000000000
--- a/drivers/crypto/ccp/ccp-platform.c
+++ /dev/null
@@ -1,293 +0,0 @@
-/*
- * AMD Cryptographic Coprocessor (CCP) driver
- *
- * Copyright (C) 2014,2016 Advanced Micro Devices, Inc.
- *
- * Author: Tom Lendacky <thomas.lendacky@amd.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/device.h>
-#include <linux/platform_device.h>
-#include <linux/ioport.h>
-#include <linux/dma-mapping.h>
-#include <linux/kthread.h>
-#include <linux/sched.h>
-#include <linux/interrupt.h>
-#include <linux/spinlock.h>
-#include <linux/delay.h>
-#include <linux/ccp.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/acpi.h>
-
-#include "ccp-dev.h"
-
-struct ccp_platform {
- int coherent;
-};
-
-static const struct acpi_device_id ccp_acpi_match[];
-static const struct of_device_id ccp_of_match[];
-
-static struct ccp_vdata *ccp_get_of_version(struct platform_device *pdev)
-{
-#ifdef CONFIG_OF
- const struct of_device_id *match;
-
- match = of_match_node(ccp_of_match, pdev->dev.of_node);
- if (match && match->data)
- return (struct ccp_vdata *)match->data;
-#endif
- return NULL;
-}
-
-static struct ccp_vdata *ccp_get_acpi_version(struct platform_device *pdev)
-{
-#ifdef CONFIG_ACPI
- const struct acpi_device_id *match;
-
- match = acpi_match_device(ccp_acpi_match, &pdev->dev);
- if (match && match->driver_data)
- return (struct ccp_vdata *)match->driver_data;
-#endif
- return NULL;
-}
-
-static int ccp_get_irq(struct ccp_device *ccp)
-{
- struct device *dev = ccp->dev;
- struct platform_device *pdev = to_platform_device(dev);
- int ret;
-
- ret = platform_get_irq(pdev, 0);
- if (ret < 0)
- return ret;
-
- ccp->irq = ret;
- ret = request_irq(ccp->irq, ccp->vdata->perform->irqhandler, 0,
- ccp->name, dev);
- if (ret) {
- dev_notice(dev, "unable to allocate IRQ (%d)\n", ret);
- return ret;
- }
-
- return 0;
-}
-
-static int ccp_get_irqs(struct ccp_device *ccp)
-{
- struct device *dev = ccp->dev;
- int ret;
-
- ret = ccp_get_irq(ccp);
- if (!ret)
- return 0;
-
- /* Couldn't get an interrupt */
- dev_notice(dev, "could not enable interrupts (%d)\n", ret);
-
- return ret;
-}
-
-static void ccp_free_irqs(struct ccp_device *ccp)
-{
- struct device *dev = ccp->dev;
-
- free_irq(ccp->irq, dev);
-}
-
-static struct resource *ccp_find_mmio_area(struct ccp_device *ccp)
-{
- struct device *dev = ccp->dev;
- struct platform_device *pdev = to_platform_device(dev);
- struct resource *ior;
-
- ior = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (ior && (resource_size(ior) >= 0x800))
- return ior;
-
- return NULL;
-}
-
-static int ccp_platform_probe(struct platform_device *pdev)
-{
- struct ccp_device *ccp;
- struct ccp_platform *ccp_platform;
- struct device *dev = &pdev->dev;
- enum dev_dma_attr attr;
- struct resource *ior;
- int ret;
-
- ret = -ENOMEM;
- ccp = ccp_alloc_struct(dev);
- if (!ccp)
- goto e_err;
-
- ccp_platform = devm_kzalloc(dev, sizeof(*ccp_platform), GFP_KERNEL);
- if (!ccp_platform)
- goto e_err;
-
- ccp->dev_specific = ccp_platform;
- ccp->vdata = pdev->dev.of_node ? ccp_get_of_version(pdev)
- : ccp_get_acpi_version(pdev);
- if (!ccp->vdata || !ccp->vdata->version) {
- ret = -ENODEV;
- dev_err(dev, "missing driver data\n");
- goto e_err;
- }
- ccp->get_irq = ccp_get_irqs;
- ccp->free_irq = ccp_free_irqs;
-
- ior = ccp_find_mmio_area(ccp);
- ccp->io_map = devm_ioremap_resource(dev, ior);
- if (IS_ERR(ccp->io_map)) {
- ret = PTR_ERR(ccp->io_map);
- goto e_err;
- }
- ccp->io_regs = ccp->io_map;
-
- attr = device_get_dma_attr(dev);
- if (attr == DEV_DMA_NOT_SUPPORTED) {
- dev_err(dev, "DMA is not supported");
- goto e_err;
- }
-
- ccp_platform->coherent = (attr == DEV_DMA_COHERENT);
- if (ccp_platform->coherent)
- ccp->axcache = CACHE_WB_NO_ALLOC;
- else
- ccp->axcache = CACHE_NONE;
-
- ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
- if (ret) {
- dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n", ret);
- goto e_err;
- }
-
- dev_set_drvdata(dev, ccp);
-
- ret = ccp->vdata->perform->init(ccp);
- if (ret)
- goto e_err;
-
- dev_notice(dev, "enabled\n");
-
- return 0;
-
-e_err:
- dev_notice(dev, "initialization failed\n");
- return ret;
-}
-
-static int ccp_platform_remove(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct ccp_device *ccp = dev_get_drvdata(dev);
-
- ccp->vdata->perform->destroy(ccp);
-
- dev_notice(dev, "disabled\n");
-
- return 0;
-}
-
-#ifdef CONFIG_PM
-static int ccp_platform_suspend(struct platform_device *pdev,
- pm_message_t state)
-{
- struct device *dev = &pdev->dev;
- struct ccp_device *ccp = dev_get_drvdata(dev);
- unsigned long flags;
- unsigned int i;
-
- spin_lock_irqsave(&ccp->cmd_lock, flags);
-
- ccp->suspending = 1;
-
- /* Wake all the queue kthreads to prepare for suspend */
- for (i = 0; i < ccp->cmd_q_count; i++)
- wake_up_process(ccp->cmd_q[i].kthread);
-
- spin_unlock_irqrestore(&ccp->cmd_lock, flags);
-
- /* Wait for all queue kthreads to say they're done */
- while (!ccp_queues_suspended(ccp))
- wait_event_interruptible(ccp->suspend_queue,
- ccp_queues_suspended(ccp));
-
- return 0;
-}
-
-static int ccp_platform_resume(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct ccp_device *ccp = dev_get_drvdata(dev);
- unsigned long flags;
- unsigned int i;
-
- spin_lock_irqsave(&ccp->cmd_lock, flags);
-
- ccp->suspending = 0;
-
- /* Wake up all the kthreads */
- for (i = 0; i < ccp->cmd_q_count; i++) {
- ccp->cmd_q[i].suspended = 0;
- wake_up_process(ccp->cmd_q[i].kthread);
- }
-
- spin_unlock_irqrestore(&ccp->cmd_lock, flags);
-
- return 0;
-}
-#endif
-
-#ifdef CONFIG_ACPI
-static const struct acpi_device_id ccp_acpi_match[] = {
- { "AMDI0C00", (kernel_ulong_t)&ccpv3 },
- { },
-};
-MODULE_DEVICE_TABLE(acpi, ccp_acpi_match);
-#endif
-
-#ifdef CONFIG_OF
-static const struct of_device_id ccp_of_match[] = {
- { .compatible = "amd,ccp-seattle-v1a",
- .data = (const void *)&ccpv3 },
- { },
-};
-MODULE_DEVICE_TABLE(of, ccp_of_match);
-#endif
-
-static struct platform_driver ccp_platform_driver = {
- .driver = {
- .name = "ccp",
-#ifdef CONFIG_ACPI
- .acpi_match_table = ccp_acpi_match,
-#endif
-#ifdef CONFIG_OF
- .of_match_table = ccp_of_match,
-#endif
- },
- .probe = ccp_platform_probe,
- .remove = ccp_platform_remove,
-#ifdef CONFIG_PM
- .suspend = ccp_platform_suspend,
- .resume = ccp_platform_resume,
-#endif
-};
-
-int ccp_platform_init(void)
-{
- return platform_driver_register(&ccp_platform_driver);
-}
-
-void ccp_platform_exit(void)
-{
- platform_driver_unregister(&ccp_platform_driver);
-}
diff --git a/drivers/crypto/ccp/sp-dev.c b/drivers/crypto/ccp/sp-dev.c
new file mode 100644
index 000000000000..bef387c8abfd
--- /dev/null
+++ b/drivers/crypto/ccp/sp-dev.c
@@ -0,0 +1,277 @@
+/*
+ * AMD Secure Processor driver
+ *
+ * Copyright (C) 2017 Advanced Micro Devices, Inc.
+ *
+ * Author: Tom Lendacky <thomas.lendacky@amd.com>
+ * Author: Gary R Hook <gary.hook@amd.com>
+ * Author: Brijesh Singh <brijesh.singh@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/spinlock_types.h>
+#include <linux/types.h>
+#include <linux/ccp.h>
+
+#include "ccp-dev.h"
+#include "sp-dev.h"
+
+MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>");
+MODULE_AUTHOR("Gary R Hook <gary.hook@amd.com>");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("1.1.0");
+MODULE_DESCRIPTION("AMD Secure Processor driver");
+
+/* List of SPs, SP count, read-write access lock, and access functions
+ *
+ * Lock structure: get sp_unit_lock for reading whenever we need to
+ * examine the SP list.
+ */
+static DEFINE_RWLOCK(sp_unit_lock);
+static LIST_HEAD(sp_units);
+
+/* Ever-increasing value to produce unique unit numbers */
+static atomic_t sp_ordinal;
+
+static void sp_add_device(struct sp_device *sp)
+{
+ unsigned long flags;
+
+ write_lock_irqsave(&sp_unit_lock, flags);
+
+ list_add_tail(&sp->entry, &sp_units);
+
+ write_unlock_irqrestore(&sp_unit_lock, flags);
+}
+
+static void sp_del_device(struct sp_device *sp)
+{
+ unsigned long flags;
+
+ write_lock_irqsave(&sp_unit_lock, flags);
+
+ list_del(&sp->entry);
+
+ write_unlock_irqrestore(&sp_unit_lock, flags);
+}
+
+static irqreturn_t sp_irq_handler(int irq, void *data)
+{
+ struct sp_device *sp = data;
+
+ if (sp->ccp_irq_handler)
+ sp->ccp_irq_handler(irq, sp->ccp_irq_data);
+
+ if (sp->psp_irq_handler)
+ sp->psp_irq_handler(irq, sp->psp_irq_data);
+
+ return IRQ_HANDLED;
+}
+
+int sp_request_ccp_irq(struct sp_device *sp, irq_handler_t handler,
+ const char *name, void *data)
+{
+ int ret;
+
+ if ((sp->psp_irq == sp->ccp_irq) && sp->dev_vdata->psp_vdata) {
+ /* Need a common routine to manage all interrupts */
+ sp->ccp_irq_data = data;
+ sp->ccp_irq_handler = handler;
+
+ if (!sp->irq_registered) {
+ ret = request_irq(sp->ccp_irq, sp_irq_handler, 0,
+ sp->name, sp);
+ if (ret)
+ return ret;
+
+ sp->irq_registered = true;
+ }
+ } else {
+ /* Each sub-device can manage it's own interrupt */
+ ret = request_irq(sp->ccp_irq, handler, 0, name, data);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+int sp_request_psp_irq(struct sp_device *sp, irq_handler_t handler,
+ const char *name, void *data)
+{
+ int ret;
+
+ if ((sp->psp_irq == sp->ccp_irq) && sp->dev_vdata->ccp_vdata) {
+ /* Need a common routine to manage all interrupts */
+ sp->psp_irq_data = data;
+ sp->psp_irq_handler = handler;
+
+ if (!sp->irq_registered) {
+ ret = request_irq(sp->psp_irq, sp_irq_handler, 0,
+ sp->name, sp);
+ if (ret)
+ return ret;
+
+ sp->irq_registered = true;
+ }
+ } else {
+ /* Each sub-device can manage it's own interrupt */
+ ret = request_irq(sp->psp_irq, handler, 0, name, data);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+void sp_free_ccp_irq(struct sp_device *sp, void *data)
+{
+ if ((sp->psp_irq == sp->ccp_irq) && sp->dev_vdata->psp_vdata) {
+ /* Using common routine to manage all interrupts */
+ if (!sp->psp_irq_handler) {
+ /* Nothing else using it, so free it */
+ free_irq(sp->ccp_irq, sp);
+
+ sp->irq_registered = false;
+ }
+
+ sp->ccp_irq_handler = NULL;
+ sp->ccp_irq_data = NULL;
+ } else {
+ /* Each sub-device can manage it's own interrupt */
+ free_irq(sp->ccp_irq, data);
+ }
+}
+
+void sp_free_psp_irq(struct sp_device *sp, void *data)
+{
+ if ((sp->psp_irq == sp->ccp_irq) && sp->dev_vdata->ccp_vdata) {
+ /* Using common routine to manage all interrupts */
+ if (!sp->ccp_irq_handler) {
+ /* Nothing else using it, so free it */
+ free_irq(sp->psp_irq, sp);
+
+ sp->irq_registered = false;
+ }
+
+ sp->psp_irq_handler = NULL;
+ sp->psp_irq_data = NULL;
+ } else {
+ /* Each sub-device can manage it's own interrupt */
+ free_irq(sp->psp_irq, data);
+ }
+}
+
+/**
+ * sp_alloc_struct - allocate and initialize the sp_device struct
+ *
+ * @dev: device struct of the SP
+ */
+struct sp_device *sp_alloc_struct(struct device *dev)
+{
+ struct sp_device *sp;
+
+ sp = devm_kzalloc(dev, sizeof(*sp), GFP_KERNEL);
+ if (!sp)
+ return NULL;
+
+ sp->dev = dev;
+ sp->ord = atomic_inc_return(&sp_ordinal);
+ snprintf(sp->name, SP_MAX_NAME_LEN, "sp-%u", sp->ord);
+
+ return sp;
+}
+
+int sp_init(struct sp_device *sp)
+{
+ sp_add_device(sp);
+
+ if (sp->dev_vdata->ccp_vdata)
+ ccp_dev_init(sp);
+
+ return 0;
+}
+
+void sp_destroy(struct sp_device *sp)
+{
+ if (sp->dev_vdata->ccp_vdata)
+ ccp_dev_destroy(sp);
+
+ sp_del_device(sp);
+}
+
+#ifdef CONFIG_PM
+int sp_suspend(struct sp_device *sp, pm_message_t state)
+{
+ int ret;
+
+ if (sp->dev_vdata->ccp_vdata) {
+ ret = ccp_dev_suspend(sp, state);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+int sp_resume(struct sp_device *sp)
+{
+ int ret;
+
+ if (sp->dev_vdata->ccp_vdata) {
+ ret = ccp_dev_resume(sp);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+#endif
+
+static int __init sp_mod_init(void)
+{
+#ifdef CONFIG_X86
+ int ret;
+
+ ret = sp_pci_init();
+ if (ret)
+ return ret;
+
+ return 0;
+#endif
+
+#ifdef CONFIG_ARM64
+ int ret;
+
+ ret = sp_platform_init();
+ if (ret)
+ return ret;
+
+ return 0;
+#endif
+
+ return -ENODEV;
+}
+
+static void __exit sp_mod_exit(void)
+{
+#ifdef CONFIG_X86
+ sp_pci_exit();
+#endif
+
+#ifdef CONFIG_ARM64
+ sp_platform_exit();
+#endif
+}
+
+module_init(sp_mod_init);
+module_exit(sp_mod_exit);
diff --git a/drivers/crypto/ccp/sp-dev.h b/drivers/crypto/ccp/sp-dev.h
new file mode 100644
index 000000000000..5ab486ade1ad
--- /dev/null
+++ b/drivers/crypto/ccp/sp-dev.h
@@ -0,0 +1,133 @@
+/*
+ * AMD Secure Processor driver
+ *
+ * Copyright (C) 2017 Advanced Micro Devices, Inc.
+ *
+ * Author: Tom Lendacky <thomas.lendacky@amd.com>
+ * Author: Gary R Hook <gary.hook@amd.com>
+ * Author: Brijesh Singh <brijesh.singh@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __SP_DEV_H__
+#define __SP_DEV_H__
+
+#include <linux/device.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/wait.h>
+#include <linux/dmapool.h>
+#include <linux/hw_random.h>
+#include <linux/bitops.h>
+#include <linux/interrupt.h>
+#include <linux/irqreturn.h>
+
+#define SP_MAX_NAME_LEN 32
+
+#define CACHE_NONE 0x00
+#define CACHE_WB_NO_ALLOC 0xb7
+
+/* Structure to hold CCP device data */
+struct ccp_device;
+struct ccp_vdata {
+ const unsigned int version;
+ const unsigned int dma_chan_attr;
+ void (*setup)(struct ccp_device *);
+ const struct ccp_actions *perform;
+ const unsigned int offset;
+ const unsigned int rsamax;
+};
+/* Structure to hold SP device data */
+struct sp_dev_vdata {
+ const unsigned int bar;
+
+ const struct ccp_vdata *ccp_vdata;
+ void *psp_vdata;
+};
+
+struct sp_device {
+ struct list_head entry;
+
+ struct device *dev;
+
+ struct sp_dev_vdata *dev_vdata;
+ unsigned int ord;
+ char name[SP_MAX_NAME_LEN];
+
+ /* Bus specific device information */
+ void *dev_specific;
+
+ /* I/O area used for device communication. */
+ void __iomem *io_map;
+
+ /* DMA caching attribute support */
+ unsigned int axcache;
+
+ bool irq_registered;
+ bool use_tasklet;
+
+ unsigned int ccp_irq;
+ irq_handler_t ccp_irq_handler;
+ void *ccp_irq_data;
+
+ unsigned int psp_irq;
+ irq_handler_t psp_irq_handler;
+ void *psp_irq_data;
+
+ void *ccp_data;
+ void *psp_data;
+};
+
+int sp_pci_init(void);
+void sp_pci_exit(void);
+
+int sp_platform_init(void);
+void sp_platform_exit(void);
+
+struct sp_device *sp_alloc_struct(struct device *dev);
+
+int sp_init(struct sp_device *sp);
+void sp_destroy(struct sp_device *sp);
+struct sp_device *sp_get_master(void);
+
+int sp_suspend(struct sp_device *sp, pm_message_t state);
+int sp_resume(struct sp_device *sp);
+int sp_request_ccp_irq(struct sp_device *sp, irq_handler_t handler,
+ const char *name, void *data);
+void sp_free_ccp_irq(struct sp_device *sp, void *data);
+int sp_request_psp_irq(struct sp_device *sp, irq_handler_t handler,
+ const char *name, void *data);
+void sp_free_psp_irq(struct sp_device *sp, void *data);
+
+#ifdef CONFIG_CRYPTO_DEV_SP_CCP
+
+int ccp_dev_init(struct sp_device *sp);
+void ccp_dev_destroy(struct sp_device *sp);
+
+int ccp_dev_suspend(struct sp_device *sp, pm_message_t state);
+int ccp_dev_resume(struct sp_device *sp);
+
+#else /* !CONFIG_CRYPTO_DEV_SP_CCP */
+
+static inline int ccp_dev_init(struct sp_device *sp)
+{
+ return 0;
+}
+static inline void ccp_dev_destroy(struct sp_device *sp) { }
+
+static inline int ccp_dev_suspend(struct sp_device *sp, pm_message_t state)
+{
+ return 0;
+}
+static inline int ccp_dev_resume(struct sp_device *sp)
+{
+ return 0;
+}
+#endif /* CONFIG_CRYPTO_DEV_SP_CCP */
+
+#endif
diff --git a/drivers/crypto/ccp/sp-pci.c b/drivers/crypto/ccp/sp-pci.c
new file mode 100644
index 000000000000..9859aa683a28
--- /dev/null
+++ b/drivers/crypto/ccp/sp-pci.c
@@ -0,0 +1,276 @@
+/*
+ * AMD Secure Processor device driver
+ *
+ * Copyright (C) 2013,2016 Advanced Micro Devices, Inc.
+ *
+ * Author: Tom Lendacky <thomas.lendacky@amd.com>
+ * Author: Gary R Hook <gary.hook@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/dma-mapping.h>
+#include <linux/kthread.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/ccp.h>
+
+#include "ccp-dev.h"
+
+#define MSIX_VECTORS 2
+
+struct sp_pci {
+ int msix_count;
+ struct msix_entry msix_entry[MSIX_VECTORS];
+};
+
+static int sp_get_msix_irqs(struct sp_device *sp)
+{
+ struct sp_pci *sp_pci = sp->dev_specific;
+ struct device *dev = sp->dev;
+ struct pci_dev *pdev = to_pci_dev(dev);
+ int v, ret;
+
+ for (v = 0; v < ARRAY_SIZE(sp_pci->msix_entry); v++)
+ sp_pci->msix_entry[v].entry = v;
+
+ ret = pci_enable_msix_range(pdev, sp_pci->msix_entry, 1, v);
+ if (ret < 0)
+ return ret;
+
+ sp_pci->msix_count = ret;
+ sp->use_tasklet = true;
+
+ sp->psp_irq = sp_pci->msix_entry[0].vector;
+ sp->ccp_irq = (sp_pci->msix_count > 1) ? sp_pci->msix_entry[1].vector
+ : sp_pci->msix_entry[0].vector;
+ return 0;
+}
+
+static int sp_get_msi_irq(struct sp_device *sp)
+{
+ struct device *dev = sp->dev;
+ struct pci_dev *pdev = to_pci_dev(dev);
+ int ret;
+
+ ret = pci_enable_msi(pdev);
+ if (ret)
+ return ret;
+
+ sp->ccp_irq = pdev->irq;
+ sp->psp_irq = pdev->irq;
+
+ return 0;
+}
+
+static int sp_get_irqs(struct sp_device *sp)
+{
+ struct device *dev = sp->dev;
+ int ret;
+
+ ret = sp_get_msix_irqs(sp);
+ if (!ret)
+ return 0;
+
+ /* Couldn't get MSI-X vectors, try MSI */
+ dev_notice(dev, "could not enable MSI-X (%d), trying MSI\n", ret);
+ ret = sp_get_msi_irq(sp);
+ if (!ret)
+ return 0;
+
+ /* Couldn't get MSI interrupt */
+ dev_notice(dev, "could not enable MSI (%d)\n", ret);
+
+ return ret;
+}
+
+static void sp_free_irqs(struct sp_device *sp)
+{
+ struct sp_pci *sp_pci = sp->dev_specific;
+ struct device *dev = sp->dev;
+ struct pci_dev *pdev = to_pci_dev(dev);
+
+ if (sp_pci->msix_count)
+ pci_disable_msix(pdev);
+ else if (sp->psp_irq)
+ pci_disable_msi(pdev);
+
+ sp->ccp_irq = 0;
+ sp->psp_irq = 0;
+}
+
+static int sp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct sp_device *sp;
+ struct sp_pci *sp_pci;
+ struct device *dev = &pdev->dev;
+ void __iomem * const *iomap_table;
+ int bar_mask;
+ int ret;
+
+ ret = -ENOMEM;
+ sp = sp_alloc_struct(dev);
+ if (!sp)
+ goto e_err;
+
+ sp_pci = devm_kzalloc(dev, sizeof(*sp_pci), GFP_KERNEL);
+ if (!sp_pci)
+ goto e_err;
+
+ sp->dev_specific = sp_pci;
+ sp->dev_vdata = (struct sp_dev_vdata *)id->driver_data;
+ if (!sp->dev_vdata) {
+ ret = -ENODEV;
+ dev_err(dev, "missing driver data\n");
+ goto e_err;
+ }
+
+ ret = pcim_enable_device(pdev);
+ if (ret) {
+ dev_err(dev, "pcim_enable_device failed (%d)\n", ret);
+ goto e_err;
+ }
+
+ bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
+ ret = pcim_iomap_regions(pdev, bar_mask, "ccp");
+ if (ret) {
+ dev_err(dev, "pcim_iomap_regions failed (%d)\n", ret);
+ goto e_err;
+ }
+
+ iomap_table = pcim_iomap_table(pdev);
+ if (!iomap_table) {
+ dev_err(dev, "pcim_iomap_table failed\n");
+ ret = -ENOMEM;
+ goto e_err;
+ }
+
+ sp->io_map = iomap_table[sp->dev_vdata->bar];
+ if (!sp->io_map) {
+ dev_err(dev, "ioremap failed\n");
+ ret = -ENOMEM;
+ goto e_err;
+ }
+
+ ret = sp_get_irqs(sp);
+ if (ret)
+ goto e_err;
+
+ pci_set_master(pdev);
+
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
+ if (ret) {
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n",
+ ret);
+ goto e_err;
+ }
+ }
+
+ dev_set_drvdata(dev, sp);
+
+ ret = sp_init(sp);
+ if (ret)
+ goto e_err;
+
+ dev_notice(dev, "enabled\n");
+
+ return 0;
+
+e_err:
+ dev_notice(dev, "initialization failed\n");
+ return ret;
+}
+
+static void sp_pci_remove(struct pci_dev *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct sp_device *sp = dev_get_drvdata(dev);
+
+ if (!sp)
+ return;
+
+ sp_destroy(sp);
+
+ sp_free_irqs(sp);
+
+ dev_notice(dev, "disabled\n");
+}
+
+#ifdef CONFIG_PM
+static int sp_pci_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct device *dev = &pdev->dev;
+ struct sp_device *sp = dev_get_drvdata(dev);
+
+ return sp_suspend(sp, state);
+}
+
+static int sp_pci_resume(struct pci_dev *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct sp_device *sp = dev_get_drvdata(dev);
+
+ return sp_resume(sp);
+}
+#endif
+
+static const struct sp_dev_vdata dev_vdata[] = {
+ {
+ .bar = 2,
+#ifdef CONFIG_CRYPTO_DEV_SP_CCP
+ .ccp_vdata = &ccpv3,
+#endif
+ },
+ {
+ .bar = 2,
+#ifdef CONFIG_CRYPTO_DEV_SP_CCP
+ .ccp_vdata = &ccpv5a,
+#endif
+ },
+ {
+ .bar = 2,
+#ifdef CONFIG_CRYPTO_DEV_SP_CCP
+ .ccp_vdata = &ccpv5b,
+#endif
+ },
+};
+static const struct pci_device_id sp_pci_table[] = {
+ { PCI_VDEVICE(AMD, 0x1537), (kernel_ulong_t)&dev_vdata[0] },
+ { PCI_VDEVICE(AMD, 0x1456), (kernel_ulong_t)&dev_vdata[1] },
+ { PCI_VDEVICE(AMD, 0x1468), (kernel_ulong_t)&dev_vdata[2] },
+ /* Last entry must be zero */
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, sp_pci_table);
+
+static struct pci_driver sp_pci_driver = {
+ .name = "ccp",
+ .id_table = sp_pci_table,
+ .probe = sp_pci_probe,
+ .remove = sp_pci_remove,
+#ifdef CONFIG_PM
+ .suspend = sp_pci_suspend,
+ .resume = sp_pci_resume,
+#endif
+};
+
+int sp_pci_init(void)
+{
+ return pci_register_driver(&sp_pci_driver);
+}
+
+void sp_pci_exit(void)
+{
+ pci_unregister_driver(&sp_pci_driver);
+}
diff --git a/drivers/crypto/ccp/sp-platform.c b/drivers/crypto/ccp/sp-platform.c
new file mode 100644
index 000000000000..71734f254fd1
--- /dev/null
+++ b/drivers/crypto/ccp/sp-platform.c
@@ -0,0 +1,256 @@
+/*
+ * AMD Secure Processor device driver
+ *
+ * Copyright (C) 2014,2016 Advanced Micro Devices, Inc.
+ *
+ * Author: Tom Lendacky <thomas.lendacky@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/ioport.h>
+#include <linux/dma-mapping.h>
+#include <linux/kthread.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/ccp.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/acpi.h>
+
+#include "ccp-dev.h"
+
+struct sp_platform {
+ int coherent;
+ unsigned int irq_count;
+};
+
+static const struct acpi_device_id sp_acpi_match[];
+static const struct of_device_id sp_of_match[];
+
+static struct sp_dev_vdata *sp_get_of_version(struct platform_device *pdev)
+{
+#ifdef CONFIG_OF
+ const struct of_device_id *match;
+
+ match = of_match_node(sp_of_match, pdev->dev.of_node);
+ if (match && match->data)
+ return (struct sp_dev_vdata *)match->data;
+#endif
+ return NULL;
+}
+
+static struct sp_dev_vdata *sp_get_acpi_version(struct platform_device *pdev)
+{
+#ifdef CONFIG_ACPI
+ const struct acpi_device_id *match;
+
+ match = acpi_match_device(sp_acpi_match, &pdev->dev);
+ if (match && match->driver_data)
+ return (struct sp_dev_vdata *)match->driver_data;
+#endif
+ return NULL;
+}
+
+static int sp_get_irqs(struct sp_device *sp)
+{
+ struct sp_platform *sp_platform = sp->dev_specific;
+ struct device *dev = sp->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ unsigned int i, count;
+ int ret;
+
+ for (i = 0, count = 0; i < pdev->num_resources; i++) {
+ struct resource *res = &pdev->resource[i];
+
+ if (resource_type(res) == IORESOURCE_IRQ)
+ count++;
+ }
+
+ sp_platform->irq_count = count;
+
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0) {
+ dev_notice(dev, "unable to get IRQ (%d)\n", ret);
+ return ret;
+ }
+
+ sp->psp_irq = ret;
+ if (count == 1) {
+ sp->ccp_irq = ret;
+ } else {
+ ret = platform_get_irq(pdev, 1);
+ if (ret < 0) {
+ dev_notice(dev, "unable to get IRQ (%d)\n", ret);
+ return ret;
+ }
+
+ sp->ccp_irq = ret;
+ }
+
+ return 0;
+}
+
+static int sp_platform_probe(struct platform_device *pdev)
+{
+ struct sp_device *sp;
+ struct sp_platform *sp_platform;
+ struct device *dev = &pdev->dev;
+ enum dev_dma_attr attr;
+ struct resource *ior;
+ int ret;
+
+ ret = -ENOMEM;
+ sp = sp_alloc_struct(dev);
+ if (!sp)
+ goto e_err;
+
+ sp_platform = devm_kzalloc(dev, sizeof(*sp_platform), GFP_KERNEL);
+ if (!sp_platform)
+ goto e_err;
+
+ sp->dev_specific = sp_platform;
+ sp->dev_vdata = pdev->dev.of_node ? sp_get_of_version(pdev)
+ : sp_get_acpi_version(pdev);
+ if (!sp->dev_vdata) {
+ ret = -ENODEV;
+ dev_err(dev, "missing driver data\n");
+ goto e_err;
+ }
+
+ ior = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ sp->io_map = devm_ioremap_resource(dev, ior);
+ if (IS_ERR(sp->io_map)) {
+ ret = PTR_ERR(sp->io_map);
+ goto e_err;
+ }
+
+ attr = device_get_dma_attr(dev);
+ if (attr == DEV_DMA_NOT_SUPPORTED) {
+ dev_err(dev, "DMA is not supported");
+ goto e_err;
+ }
+
+ sp_platform->coherent = (attr == DEV_DMA_COHERENT);
+ if (sp_platform->coherent)
+ sp->axcache = CACHE_WB_NO_ALLOC;
+ else
+ sp->axcache = CACHE_NONE;
+
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
+ if (ret) {
+ dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n", ret);
+ goto e_err;
+ }
+
+ ret = sp_get_irqs(sp);
+ if (ret)
+ goto e_err;
+
+ dev_set_drvdata(dev, sp);
+
+ ret = sp_init(sp);
+ if (ret)
+ goto e_err;
+
+ dev_notice(dev, "enabled\n");
+
+ return 0;
+
+e_err:
+ dev_notice(dev, "initialization failed\n");
+ return ret;
+}
+
+static int sp_platform_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct sp_device *sp = dev_get_drvdata(dev);
+
+ sp_destroy(sp);
+
+ dev_notice(dev, "disabled\n");
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int sp_platform_suspend(struct platform_device *pdev,
+ pm_message_t state)
+{
+ struct device *dev = &pdev->dev;
+ struct sp_device *sp = dev_get_drvdata(dev);
+
+ return sp_suspend(sp, state);
+}
+
+static int sp_platform_resume(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct sp_device *sp = dev_get_drvdata(dev);
+
+ return sp_resume(sp);
+}
+#endif
+
+static const struct sp_dev_vdata dev_vdata[] = {
+ {
+ .bar = 0,
+#ifdef CONFIG_CRYPTO_DEV_SP_CCP
+ .ccp_vdata = &ccpv3_platform,
+#endif
+ },
+};
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id sp_acpi_match[] = {
+ { "AMDI0C00", (kernel_ulong_t)&dev_vdata[0] },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, sp_acpi_match);
+#endif
+
+#ifdef CONFIG_OF
+static const struct of_device_id sp_of_match[] = {
+ { .compatible = "amd,ccp-seattle-v1a",
+ .data = (const void *)&dev_vdata[0] },
+ { },
+};
+MODULE_DEVICE_TABLE(of, sp_of_match);
+#endif
+
+static struct platform_driver sp_platform_driver = {
+ .driver = {
+ .name = "ccp",
+#ifdef CONFIG_ACPI
+ .acpi_match_table = sp_acpi_match,
+#endif
+#ifdef CONFIG_OF
+ .of_match_table = sp_of_match,
+#endif
+ },
+ .probe = sp_platform_probe,
+ .remove = sp_platform_remove,
+#ifdef CONFIG_PM
+ .suspend = sp_platform_suspend,
+ .resume = sp_platform_resume,
+#endif
+};
+
+int sp_platform_init(void)
+{
+ return platform_driver_register(&sp_platform_driver);
+}
+
+void sp_platform_exit(void)
+{
+ platform_driver_unregister(&sp_platform_driver);
+}
diff --git a/drivers/crypto/geode-aes.c b/drivers/crypto/geode-aes.c
index fe538e5287a5..eb2a0a73cbed 100644
--- a/drivers/crypto/geode-aes.c
+++ b/drivers/crypto/geode-aes.c
@@ -1,10 +1,10 @@
/* Copyright (C) 2004-2006, Advanced Micro Devices, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
#include <linux/module.h>
#include <linux/kernel.h>
@@ -30,6 +30,7 @@ static inline void
_writefield(u32 offset, void *value)
{
int i;
+
for (i = 0; i < 4; i++)
iowrite32(((u32 *) value)[i], _iobase + offset + (i * 4));
}
@@ -39,6 +40,7 @@ static inline void
_readfield(u32 offset, void *value)
{
int i;
+
for (i = 0; i < 4; i++)
((u32 *) value)[i] = ioread32(_iobase + offset + (i * 4));
}
@@ -515,6 +517,7 @@ static void geode_aes_remove(struct pci_dev *dev)
static int geode_aes_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
int ret;
+
ret = pci_enable_device(dev);
if (ret)
return ret;
@@ -570,7 +573,7 @@ static int geode_aes_probe(struct pci_dev *dev, const struct pci_device_id *id)
}
static struct pci_device_id geode_aes_tbl[] = {
- { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_LX_AES), } ,
+ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_LX_AES), },
{ 0, }
};
diff --git a/drivers/crypto/img-hash.c b/drivers/crypto/img-hash.c
index 0c6a917a9ab8..b87000a0a01c 100644
--- a/drivers/crypto/img-hash.c
+++ b/drivers/crypto/img-hash.c
@@ -1054,7 +1054,7 @@ res_err:
static int img_hash_remove(struct platform_device *pdev)
{
- static struct img_hash_dev *hdev;
+ struct img_hash_dev *hdev;
hdev = platform_get_drvdata(pdev);
spin_lock(&img_hash.lock);
diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c
index 1fabd4aee81b..89ba9e85c0f3 100644
--- a/drivers/crypto/inside-secure/safexcel.c
+++ b/drivers/crypto/inside-secure/safexcel.c
@@ -839,9 +839,10 @@ static int safexcel_probe(struct platform_device *pdev)
snprintf(irq_name, 6, "ring%d", i);
irq = safexcel_request_ring_irq(pdev, irq_name, safexcel_irq_ring,
ring_irq);
-
- if (irq < 0)
+ if (irq < 0) {
+ ret = irq;
goto err_clk;
+ }
priv->ring[i].work_data.priv = priv;
priv->ring[i].work_data.ring = i;
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
index 427cbe012729..dadc4a808df5 100644
--- a/drivers/crypto/ixp4xx_crypto.c
+++ b/drivers/crypto/ixp4xx_crypto.c
@@ -1073,7 +1073,7 @@ static int aead_perform(struct aead_request *req, int encrypt,
req_ctx->hmac_virt = dma_pool_alloc(buffer_pool, flags,
&crypt->icv_rev_aes);
if (unlikely(!req_ctx->hmac_virt))
- goto free_buf_src;
+ goto free_buf_dst;
if (!encrypt) {
scatterwalk_map_and_copy(req_ctx->hmac_virt,
req->src, cryptlen, authsize, 0);
@@ -1088,10 +1088,10 @@ static int aead_perform(struct aead_request *req, int encrypt,
BUG_ON(qmgr_stat_overflow(SEND_QID));
return -EINPROGRESS;
-free_buf_src:
- free_buf_chain(dev, req_ctx->src, crypt->src_buf);
free_buf_dst:
free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
+free_buf_src:
+ free_buf_chain(dev, req_ctx->src, crypt->src_buf);
crypt->ctl_flags = CTL_FLAG_UNUSED;
return -ENOMEM;
}
diff --git a/drivers/crypto/mediatek/mtk-platform.c b/drivers/crypto/mediatek/mtk-platform.c
index 000b6500a22d..b182e941b0cd 100644
--- a/drivers/crypto/mediatek/mtk-platform.c
+++ b/drivers/crypto/mediatek/mtk-platform.c
@@ -500,7 +500,7 @@ static int mtk_crypto_probe(struct platform_device *pdev)
cryp->irq[i] = platform_get_irq(pdev, i);
if (cryp->irq[i] < 0) {
dev_err(cryp->dev, "no IRQ:%d resource info\n", i);
- return -ENXIO;
+ return cryp->irq[i];
}
}
diff --git a/drivers/crypto/mxc-scc.c b/drivers/crypto/mxc-scc.c
index ee4be1b0d30b..e01c46387df8 100644
--- a/drivers/crypto/mxc-scc.c
+++ b/drivers/crypto/mxc-scc.c
@@ -708,8 +708,8 @@ static int mxc_scc_probe(struct platform_device *pdev)
for (i = 0; i < 2; i++) {
irq = platform_get_irq(pdev, i);
if (irq < 0) {
- dev_err(dev, "failed to get irq resource\n");
- ret = -EINVAL;
+ dev_err(dev, "failed to get irq resource: %d\n", irq);
+ ret = irq;
goto err_out;
}
diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c
index 625ee50fd78b..764be3e6933c 100644
--- a/drivers/crypto/mxs-dcp.c
+++ b/drivers/crypto/mxs-dcp.c
@@ -908,12 +908,16 @@ static int mxs_dcp_probe(struct platform_device *pdev)
iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
dcp_vmi_irq = platform_get_irq(pdev, 0);
- if (dcp_vmi_irq < 0)
+ if (dcp_vmi_irq < 0) {
+ dev_err(dev, "Failed to get IRQ: (%d)!\n", dcp_vmi_irq);
return dcp_vmi_irq;
+ }
dcp_irq = platform_get_irq(pdev, 1);
- if (dcp_irq < 0)
+ if (dcp_irq < 0) {
+ dev_err(dev, "Failed to get IRQ: (%d)!\n", dcp_irq);
return dcp_irq;
+ }
sdcp = devm_kzalloc(dev, sizeof(*sdcp), GFP_KERNEL);
if (!sdcp)
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c
index 269451375b63..a9fd8b9e86cd 100644
--- a/drivers/crypto/n2_core.c
+++ b/drivers/crypto/n2_core.c
@@ -1730,8 +1730,8 @@ static int spu_mdesc_walk_arcs(struct mdesc_handle *mdesc,
continue;
id = mdesc_get_property(mdesc, tgt, "id", NULL);
if (table[*id] != NULL) {
- dev_err(&dev->dev, "%s: SPU cpu slot already set.\n",
- dev->dev.of_node->full_name);
+ dev_err(&dev->dev, "%pOF: SPU cpu slot already set.\n",
+ dev->dev.of_node);
return -EINVAL;
}
cpumask_set_cpu(*id, &p->sharing);
@@ -1751,8 +1751,8 @@ static int handle_exec_unit(struct spu_mdesc_info *ip, struct list_head *list,
p = kzalloc(sizeof(struct spu_queue), GFP_KERNEL);
if (!p) {
- dev_err(&dev->dev, "%s: Could not allocate SPU queue.\n",
- dev->dev.of_node->full_name);
+ dev_err(&dev->dev, "%pOF: Could not allocate SPU queue.\n",
+ dev->dev.of_node);
return -ENOMEM;
}
@@ -1981,41 +1981,39 @@ static void n2_spu_driver_version(void)
static int n2_crypto_probe(struct platform_device *dev)
{
struct mdesc_handle *mdesc;
- const char *full_name;
struct n2_crypto *np;
int err;
n2_spu_driver_version();
- full_name = dev->dev.of_node->full_name;
- pr_info("Found N2CP at %s\n", full_name);
+ pr_info("Found N2CP at %pOF\n", dev->dev.of_node);
np = alloc_n2cp();
if (!np) {
- dev_err(&dev->dev, "%s: Unable to allocate n2cp.\n",
- full_name);
+ dev_err(&dev->dev, "%pOF: Unable to allocate n2cp.\n",
+ dev->dev.of_node);
return -ENOMEM;
}
err = grab_global_resources();
if (err) {
- dev_err(&dev->dev, "%s: Unable to grab "
- "global resources.\n", full_name);
+ dev_err(&dev->dev, "%pOF: Unable to grab global resources.\n",
+ dev->dev.of_node);
goto out_free_n2cp;
}
mdesc = mdesc_grab();
if (!mdesc) {
- dev_err(&dev->dev, "%s: Unable to grab MDESC.\n",
- full_name);
+ dev_err(&dev->dev, "%pOF: Unable to grab MDESC.\n",
+ dev->dev.of_node);
err = -ENODEV;
goto out_free_global;
}
err = grab_mdesc_irq_props(mdesc, dev, &np->cwq_info, "n2cp");
if (err) {
- dev_err(&dev->dev, "%s: Unable to grab IRQ props.\n",
- full_name);
+ dev_err(&dev->dev, "%pOF: Unable to grab IRQ props.\n",
+ dev->dev.of_node);
mdesc_release(mdesc);
goto out_free_global;
}
@@ -2026,15 +2024,15 @@ static int n2_crypto_probe(struct platform_device *dev)
mdesc_release(mdesc);
if (err) {
- dev_err(&dev->dev, "%s: CWQ MDESC scan failed.\n",
- full_name);
+ dev_err(&dev->dev, "%pOF: CWQ MDESC scan failed.\n",
+ dev->dev.of_node);
goto out_free_global;
}
err = n2_register_algs();
if (err) {
- dev_err(&dev->dev, "%s: Unable to register algorithms.\n",
- full_name);
+ dev_err(&dev->dev, "%pOF: Unable to register algorithms.\n",
+ dev->dev.of_node);
goto out_free_spu_list;
}
@@ -2092,42 +2090,40 @@ static void free_ncp(struct n2_mau *mp)
static int n2_mau_probe(struct platform_device *dev)
{
struct mdesc_handle *mdesc;
- const char *full_name;
struct n2_mau *mp;
int err;
n2_spu_driver_version();
- full_name = dev->dev.of_node->full_name;
- pr_info("Found NCP at %s\n", full_name);
+ pr_info("Found NCP at %pOF\n", dev->dev.of_node);
mp = alloc_ncp();
if (!mp) {
- dev_err(&dev->dev, "%s: Unable to allocate ncp.\n",
- full_name);
+ dev_err(&dev->dev, "%pOF: Unable to allocate ncp.\n",
+ dev->dev.of_node);
return -ENOMEM;
}
err = grab_global_resources();
if (err) {
- dev_err(&dev->dev, "%s: Unable to grab "
- "global resources.\n", full_name);
+ dev_err(&dev->dev, "%pOF: Unable to grab global resources.\n",
+ dev->dev.of_node);
goto out_free_ncp;
}
mdesc = mdesc_grab();
if (!mdesc) {
- dev_err(&dev->dev, "%s: Unable to grab MDESC.\n",
- full_name);
+ dev_err(&dev->dev, "%pOF: Unable to grab MDESC.\n",
+ dev->dev.of_node);
err = -ENODEV;
goto out_free_global;
}
err = grab_mdesc_irq_props(mdesc, dev, &mp->mau_info, "ncp");
if (err) {
- dev_err(&dev->dev, "%s: Unable to grab IRQ props.\n",
- full_name);
+ dev_err(&dev->dev, "%pOF: Unable to grab IRQ props.\n",
+ dev->dev.of_node);
mdesc_release(mdesc);
goto out_free_global;
}
@@ -2138,8 +2134,8 @@ static int n2_mau_probe(struct platform_device *dev)
mdesc_release(mdesc);
if (err) {
- dev_err(&dev->dev, "%s: MAU MDESC scan failed.\n",
- full_name);
+ dev_err(&dev->dev, "%pOF: MAU MDESC scan failed.\n",
+ dev->dev.of_node);
goto out_free_global;
}
diff --git a/drivers/crypto/nx/Kconfig b/drivers/crypto/nx/Kconfig
index ad7552a6998c..cd5dda9c48f4 100644
--- a/drivers/crypto/nx/Kconfig
+++ b/drivers/crypto/nx/Kconfig
@@ -38,6 +38,7 @@ config CRYPTO_DEV_NX_COMPRESS_PSERIES
config CRYPTO_DEV_NX_COMPRESS_POWERNV
tristate "Compression acceleration support on PowerNV platform"
depends on PPC_POWERNV
+ depends on PPC_VAS
default y
help
Support for PowerPC Nest (NX) compression acceleration. This
diff --git a/drivers/crypto/nx/nx-842-powernv.c b/drivers/crypto/nx/nx-842-powernv.c
index 1710f80a09ec..874ddf5e9087 100644
--- a/drivers/crypto/nx/nx-842-powernv.c
+++ b/drivers/crypto/nx/nx-842-powernv.c
@@ -22,6 +22,8 @@
#include <asm/prom.h>
#include <asm/icswx.h>
+#include <asm/vas.h>
+#include <asm/reg.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Dan Streetman <ddstreet@ieee.org>");
@@ -31,6 +33,9 @@ MODULE_ALIAS_CRYPTO("842-nx");
#define WORKMEM_ALIGN (CRB_ALIGN)
#define CSB_WAIT_MAX (5000) /* ms */
+#define VAS_RETRIES (10)
+/* # of requests allowed per RxFIFO at a time. 0 for unlimited */
+#define MAX_CREDITS_PER_RXFIFO (1024)
struct nx842_workmem {
/* Below fields must be properly aligned */
@@ -41,19 +46,34 @@ struct nx842_workmem {
ktime_t start;
+ struct vas_window *txwin; /* Used with VAS function */
char padding[WORKMEM_ALIGN]; /* unused, to allow alignment */
} __packed __aligned(WORKMEM_ALIGN);
struct nx842_coproc {
unsigned int chip_id;
unsigned int ct;
- unsigned int ci;
+ unsigned int ci; /* Coprocessor instance, used with icswx */
+ struct {
+ struct vas_window *rxwin;
+ int id;
+ } vas;
struct list_head list;
};
+/*
+ * Send the request to NX engine on the chip for the corresponding CPU
+ * where the process is executing. Use with VAS function.
+ */
+static DEFINE_PER_CPU(struct nx842_coproc *, coproc_inst);
+
/* no cpu hotplug on powernv, so this list never changes after init */
static LIST_HEAD(nx842_coprocs);
-static unsigned int nx842_ct;
+static unsigned int nx842_ct; /* used in icswx function */
+
+static int (*nx842_powernv_exec)(const unsigned char *in,
+ unsigned int inlen, unsigned char *out,
+ unsigned int *outlenp, void *workmem, int fc);
/**
* setup_indirect_dde - Setup an indirect DDE
@@ -238,6 +258,13 @@ static int wait_for_csb(struct nx842_workmem *wmem,
case CSB_CC_TEMPL_OVERFLOW:
CSB_ERR(csb, "Compressed data template shows data past end");
return -EINVAL;
+ case CSB_CC_EXCEED_BYTE_COUNT: /* P9 or later */
+ /*
+ * DDE byte count exceeds the limit specified in Maximum
+ * byte count register.
+ */
+ CSB_ERR(csb, "DDE byte count exceeds the limit");
+ return -EINVAL;
/* these should not happen */
case CSB_CC_INVALID_ALIGN:
@@ -279,9 +306,17 @@ static int wait_for_csb(struct nx842_workmem *wmem,
CSB_ERR(csb, "Too many DDEs in DDL");
return -EINVAL;
case CSB_CC_TRANSPORT:
+ case CSB_CC_INVALID_CRB: /* P9 or later */
/* shouldn't happen, we setup CRB correctly */
CSB_ERR(csb, "Invalid CRB");
return -EINVAL;
+ case CSB_CC_INVALID_DDE: /* P9 or later */
+ /*
+ * shouldn't happen, setup_direct/indirect_dde creates
+ * DDE right
+ */
+ CSB_ERR(csb, "Invalid DDE");
+ return -EINVAL;
case CSB_CC_SEGMENTED_DDL:
/* shouldn't happen, setup_ddl creates DDL right */
CSB_ERR(csb, "Segmented DDL error");
@@ -325,6 +360,9 @@ static int wait_for_csb(struct nx842_workmem *wmem,
case CSB_CC_HW:
CSB_ERR(csb, "Correctable hardware error");
return -EPROTO;
+ case CSB_CC_HW_EXPIRED_TIMER: /* P9 or later */
+ CSB_ERR(csb, "Job did not finish within allowed time");
+ return -EPROTO;
default:
CSB_ERR(csb, "Invalid CC %d", csb->cc);
@@ -353,8 +391,42 @@ static int wait_for_csb(struct nx842_workmem *wmem,
return 0;
}
+static int nx842_config_crb(const unsigned char *in, unsigned int inlen,
+ unsigned char *out, unsigned int outlen,
+ struct nx842_workmem *wmem)
+{
+ struct coprocessor_request_block *crb;
+ struct coprocessor_status_block *csb;
+ u64 csb_addr;
+ int ret;
+
+ crb = &wmem->crb;
+ csb = &crb->csb;
+
+ /* Clear any previous values */
+ memset(crb, 0, sizeof(*crb));
+
+ /* set up DDLs */
+ ret = setup_ddl(&crb->source, wmem->ddl_in,
+ (unsigned char *)in, inlen, true);
+ if (ret)
+ return ret;
+
+ ret = setup_ddl(&crb->target, wmem->ddl_out,
+ out, outlen, false);
+ if (ret)
+ return ret;
+
+ /* set up CRB's CSB addr */
+ csb_addr = nx842_get_pa(csb) & CRB_CSB_ADDRESS;
+ csb_addr |= CRB_CSB_AT; /* Addrs are phys */
+ crb->csb_addr = cpu_to_be64(csb_addr);
+
+ return 0;
+}
+
/**
- * nx842_powernv_function - compress/decompress data using the 842 algorithm
+ * nx842_exec_icswx - compress/decompress data using the 842 algorithm
*
* (De)compression provided by the NX842 coprocessor on IBM PowerNV systems.
* This compresses or decompresses the provided input buffer into the provided
@@ -384,7 +456,7 @@ static int wait_for_csb(struct nx842_workmem *wmem,
* -ETIMEDOUT hardware did not complete operation in reasonable time
* -EINTR operation was aborted
*/
-static int nx842_powernv_function(const unsigned char *in, unsigned int inlen,
+static int nx842_exec_icswx(const unsigned char *in, unsigned int inlen,
unsigned char *out, unsigned int *outlenp,
void *workmem, int fc)
{
@@ -392,7 +464,6 @@ static int nx842_powernv_function(const unsigned char *in, unsigned int inlen,
struct coprocessor_status_block *csb;
struct nx842_workmem *wmem;
int ret;
- u64 csb_addr;
u32 ccw;
unsigned int outlen = *outlenp;
@@ -406,32 +477,18 @@ static int nx842_powernv_function(const unsigned char *in, unsigned int inlen,
return -ENODEV;
}
- crb = &wmem->crb;
- csb = &crb->csb;
-
- /* Clear any previous values */
- memset(crb, 0, sizeof(*crb));
-
- /* set up DDLs */
- ret = setup_ddl(&crb->source, wmem->ddl_in,
- (unsigned char *)in, inlen, true);
- if (ret)
- return ret;
- ret = setup_ddl(&crb->target, wmem->ddl_out,
- out, outlen, false);
+ ret = nx842_config_crb(in, inlen, out, outlen, wmem);
if (ret)
return ret;
+ crb = &wmem->crb;
+ csb = &crb->csb;
+
/* set up CCW */
ccw = 0;
- ccw = SET_FIELD(ccw, CCW_CT, nx842_ct);
- ccw = SET_FIELD(ccw, CCW_CI_842, 0); /* use 0 for hw auto-selection */
- ccw = SET_FIELD(ccw, CCW_FC_842, fc);
-
- /* set up CRB's CSB addr */
- csb_addr = nx842_get_pa(csb) & CRB_CSB_ADDRESS;
- csb_addr |= CRB_CSB_AT; /* Addrs are phys */
- crb->csb_addr = cpu_to_be64(csb_addr);
+ ccw = SET_FIELD(CCW_CT, ccw, nx842_ct);
+ ccw = SET_FIELD(CCW_CI_842, ccw, 0); /* use 0 for hw auto-selection */
+ ccw = SET_FIELD(CCW_FC_842, ccw, fc);
wmem->start = ktime_get();
@@ -471,6 +528,104 @@ static int nx842_powernv_function(const unsigned char *in, unsigned int inlen,
}
/**
+ * nx842_exec_vas - compress/decompress data using the 842 algorithm
+ *
+ * (De)compression provided by the NX842 coprocessor on IBM PowerNV systems.
+ * This compresses or decompresses the provided input buffer into the provided
+ * output buffer.
+ *
+ * Upon return from this function @outlen contains the length of the
+ * output data. If there is an error then @outlen will be 0 and an
+ * error will be specified by the return code from this function.
+ *
+ * The @workmem buffer should only be used by one function call at a time.
+ *
+ * @in: input buffer pointer
+ * @inlen: input buffer size
+ * @out: output buffer pointer
+ * @outlenp: output buffer size pointer
+ * @workmem: working memory buffer pointer, size determined by
+ * nx842_powernv_driver.workmem_size
+ * @fc: function code, see CCW Function Codes in nx-842.h
+ *
+ * Returns:
+ * 0 Success, output of length @outlenp stored in the buffer
+ * at @out
+ * -ENODEV Hardware unavailable
+ * -ENOSPC Output buffer is to small
+ * -EMSGSIZE Input buffer too large
+ * -EINVAL buffer constraints do not fix nx842_constraints
+ * -EPROTO hardware error during operation
+ * -ETIMEDOUT hardware did not complete operation in reasonable time
+ * -EINTR operation was aborted
+ */
+static int nx842_exec_vas(const unsigned char *in, unsigned int inlen,
+ unsigned char *out, unsigned int *outlenp,
+ void *workmem, int fc)
+{
+ struct coprocessor_request_block *crb;
+ struct coprocessor_status_block *csb;
+ struct nx842_workmem *wmem;
+ struct vas_window *txwin;
+ int ret, i = 0;
+ u32 ccw;
+ unsigned int outlen = *outlenp;
+
+ wmem = PTR_ALIGN(workmem, WORKMEM_ALIGN);
+
+ *outlenp = 0;
+
+ crb = &wmem->crb;
+ csb = &crb->csb;
+
+ ret = nx842_config_crb(in, inlen, out, outlen, wmem);
+ if (ret)
+ return ret;
+
+ ccw = 0;
+ ccw = SET_FIELD(CCW_FC_842, ccw, fc);
+ crb->ccw = cpu_to_be32(ccw);
+
+ txwin = wmem->txwin;
+ /* shoudn't happen, we don't load without a coproc */
+ if (!txwin) {
+ pr_err_ratelimited("NX-842 coprocessor is not available");
+ return -ENODEV;
+ }
+
+ do {
+ wmem->start = ktime_get();
+ preempt_disable();
+ /*
+ * VAS copy CRB into L2 cache. Refer <asm/vas.h>.
+ * @crb and @offset.
+ */
+ vas_copy_crb(crb, 0);
+
+ /*
+ * VAS paste previously copied CRB to NX.
+ * @txwin, @offset and @last (must be true).
+ */
+ ret = vas_paste_crb(txwin, 0, 1);
+ preempt_enable();
+ /*
+ * Retry copy/paste function for VAS failures.
+ */
+ } while (ret && (i++ < VAS_RETRIES));
+
+ if (ret) {
+ pr_err_ratelimited("VAS copy/paste failed\n");
+ return ret;
+ }
+
+ ret = wait_for_csb(wmem, csb);
+ if (!ret)
+ *outlenp = be32_to_cpu(csb->count);
+
+ return ret;
+}
+
+/**
* nx842_powernv_compress - Compress data using the 842 algorithm
*
* Compression provided by the NX842 coprocessor on IBM PowerNV systems.
@@ -488,13 +643,13 @@ static int nx842_powernv_function(const unsigned char *in, unsigned int inlen,
* @workmem: working memory buffer pointer, size determined by
* nx842_powernv_driver.workmem_size
*
- * Returns: see @nx842_powernv_function()
+ * Returns: see @nx842_powernv_exec()
*/
static int nx842_powernv_compress(const unsigned char *in, unsigned int inlen,
unsigned char *out, unsigned int *outlenp,
void *wmem)
{
- return nx842_powernv_function(in, inlen, out, outlenp,
+ return nx842_powernv_exec(in, inlen, out, outlenp,
wmem, CCW_FC_842_COMP_CRC);
}
@@ -516,16 +671,219 @@ static int nx842_powernv_compress(const unsigned char *in, unsigned int inlen,
* @workmem: working memory buffer pointer, size determined by
* nx842_powernv_driver.workmem_size
*
- * Returns: see @nx842_powernv_function()
+ * Returns: see @nx842_powernv_exec()
*/
static int nx842_powernv_decompress(const unsigned char *in, unsigned int inlen,
unsigned char *out, unsigned int *outlenp,
void *wmem)
{
- return nx842_powernv_function(in, inlen, out, outlenp,
+ return nx842_powernv_exec(in, inlen, out, outlenp,
wmem, CCW_FC_842_DECOMP_CRC);
}
+static inline void nx842_add_coprocs_list(struct nx842_coproc *coproc,
+ int chipid)
+{
+ coproc->chip_id = chipid;
+ INIT_LIST_HEAD(&coproc->list);
+ list_add(&coproc->list, &nx842_coprocs);
+}
+
+/*
+ * Identify chip ID for each CPU and save coprocesor adddress for the
+ * corresponding NX engine in percpu coproc_inst.
+ * coproc_inst is used in crypto_init to open send window on the NX instance
+ * for the corresponding CPU / chip where the open request is executed.
+ */
+static void nx842_set_per_cpu_coproc(struct nx842_coproc *coproc)
+{
+ unsigned int i, chip_id;
+
+ for_each_possible_cpu(i) {
+ chip_id = cpu_to_chip_id(i);
+
+ if (coproc->chip_id == chip_id)
+ per_cpu(coproc_inst, i) = coproc;
+ }
+}
+
+
+static struct vas_window *nx842_alloc_txwin(struct nx842_coproc *coproc)
+{
+ struct vas_window *txwin = NULL;
+ struct vas_tx_win_attr txattr;
+
+ /*
+ * Kernel requests will be high priority. So open send
+ * windows only for high priority RxFIFO entries.
+ */
+ vas_init_tx_win_attr(&txattr, coproc->ct);
+ txattr.lpid = 0; /* lpid is 0 for kernel requests */
+ txattr.pid = 0; /* pid is 0 for kernel requests */
+
+ /*
+ * Open a VAS send window which is used to send request to NX.
+ */
+ txwin = vas_tx_win_open(coproc->vas.id, coproc->ct, &txattr);
+ if (IS_ERR(txwin)) {
+ pr_err("ibm,nx-842: Can not open TX window: %ld\n",
+ PTR_ERR(txwin));
+ return NULL;
+ }
+
+ return txwin;
+}
+
+static int __init vas_cfg_coproc_info(struct device_node *dn, int chip_id,
+ int vasid)
+{
+ struct vas_window *rxwin = NULL;
+ struct vas_rx_win_attr rxattr;
+ struct nx842_coproc *coproc;
+ u32 lpid, pid, tid, fifo_size;
+ u64 rx_fifo;
+ const char *priority;
+ int ret;
+
+ ret = of_property_read_u64(dn, "rx-fifo-address", &rx_fifo);
+ if (ret) {
+ pr_err("Missing rx-fifo-address property\n");
+ return ret;
+ }
+
+ ret = of_property_read_u32(dn, "rx-fifo-size", &fifo_size);
+ if (ret) {
+ pr_err("Missing rx-fifo-size property\n");
+ return ret;
+ }
+
+ ret = of_property_read_u32(dn, "lpid", &lpid);
+ if (ret) {
+ pr_err("Missing lpid property\n");
+ return ret;
+ }
+
+ ret = of_property_read_u32(dn, "pid", &pid);
+ if (ret) {
+ pr_err("Missing pid property\n");
+ return ret;
+ }
+
+ ret = of_property_read_u32(dn, "tid", &tid);
+ if (ret) {
+ pr_err("Missing tid property\n");
+ return ret;
+ }
+
+ ret = of_property_read_string(dn, "priority", &priority);
+ if (ret) {
+ pr_err("Missing priority property\n");
+ return ret;
+ }
+
+ coproc = kzalloc(sizeof(*coproc), GFP_KERNEL);
+ if (!coproc)
+ return -ENOMEM;
+
+ if (!strcmp(priority, "High"))
+ coproc->ct = VAS_COP_TYPE_842_HIPRI;
+ else if (!strcmp(priority, "Normal"))
+ coproc->ct = VAS_COP_TYPE_842;
+ else {
+ pr_err("Invalid RxFIFO priority value\n");
+ ret = -EINVAL;
+ goto err_out;
+ }
+
+ vas_init_rx_win_attr(&rxattr, coproc->ct);
+ rxattr.rx_fifo = (void *)rx_fifo;
+ rxattr.rx_fifo_size = fifo_size;
+ rxattr.lnotify_lpid = lpid;
+ rxattr.lnotify_pid = pid;
+ rxattr.lnotify_tid = tid;
+ rxattr.wcreds_max = MAX_CREDITS_PER_RXFIFO;
+
+ /*
+ * Open a VAS receice window which is used to configure RxFIFO
+ * for NX.
+ */
+ rxwin = vas_rx_win_open(vasid, coproc->ct, &rxattr);
+ if (IS_ERR(rxwin)) {
+ ret = PTR_ERR(rxwin);
+ pr_err("setting RxFIFO with VAS failed: %d\n",
+ ret);
+ goto err_out;
+ }
+
+ coproc->vas.rxwin = rxwin;
+ coproc->vas.id = vasid;
+ nx842_add_coprocs_list(coproc, chip_id);
+
+ /*
+ * Kernel requests use only high priority FIFOs. So save coproc
+ * info in percpu coproc_inst which will be used to open send
+ * windows for crypto open requests later.
+ */
+ if (coproc->ct == VAS_COP_TYPE_842_HIPRI)
+ nx842_set_per_cpu_coproc(coproc);
+
+ return 0;
+
+err_out:
+ kfree(coproc);
+ return ret;
+}
+
+
+static int __init nx842_powernv_probe_vas(struct device_node *pn)
+{
+ struct device_node *dn;
+ int chip_id, vasid, ret = 0;
+ int nx_fifo_found = 0;
+
+ chip_id = of_get_ibm_chip_id(pn);
+ if (chip_id < 0) {
+ pr_err("ibm,chip-id missing\n");
+ return -EINVAL;
+ }
+
+ for_each_compatible_node(dn, NULL, "ibm,power9-vas-x") {
+ if (of_get_ibm_chip_id(dn) == chip_id)
+ break;
+ }
+
+ if (!dn) {
+ pr_err("Missing VAS device node\n");
+ return -EINVAL;
+ }
+
+ if (of_property_read_u32(dn, "ibm,vas-id", &vasid)) {
+ pr_err("Missing ibm,vas-id device property\n");
+ of_node_put(dn);
+ return -EINVAL;
+ }
+
+ of_node_put(dn);
+
+ for_each_child_of_node(pn, dn) {
+ if (of_device_is_compatible(dn, "ibm,p9-nx-842")) {
+ ret = vas_cfg_coproc_info(dn, chip_id, vasid);
+ if (ret) {
+ of_node_put(dn);
+ return ret;
+ }
+ nx_fifo_found++;
+ }
+ }
+
+ if (!nx_fifo_found) {
+ pr_err("NX842 FIFO nodes are missing\n");
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
static int __init nx842_powernv_probe(struct device_node *dn)
{
struct nx842_coproc *coproc;
@@ -552,11 +910,9 @@ static int __init nx842_powernv_probe(struct device_node *dn)
if (!coproc)
return -ENOMEM;
- coproc->chip_id = chip_id;
coproc->ct = ct;
coproc->ci = ci;
- INIT_LIST_HEAD(&coproc->list);
- list_add(&coproc->list, &nx842_coprocs);
+ nx842_add_coprocs_list(coproc, chip_id);
pr_info("coprocessor found on chip %d, CT %d CI %d\n", chip_id, ct, ci);
@@ -569,6 +925,19 @@ static int __init nx842_powernv_probe(struct device_node *dn)
return 0;
}
+static void nx842_delete_coprocs(void)
+{
+ struct nx842_coproc *coproc, *n;
+
+ list_for_each_entry_safe(coproc, n, &nx842_coprocs, list) {
+ if (coproc->vas.rxwin)
+ vas_win_close(coproc->vas.rxwin);
+
+ list_del(&coproc->list);
+ kfree(coproc);
+ }
+}
+
static struct nx842_constraints nx842_powernv_constraints = {
.alignment = DDE_BUFFER_ALIGN,
.multiple = DDE_BUFFER_LAST_MULT,
@@ -585,6 +954,46 @@ static struct nx842_driver nx842_powernv_driver = {
.decompress = nx842_powernv_decompress,
};
+static int nx842_powernv_crypto_init_vas(struct crypto_tfm *tfm)
+{
+ struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct nx842_workmem *wmem;
+ struct nx842_coproc *coproc;
+ int ret;
+
+ ret = nx842_crypto_init(tfm, &nx842_powernv_driver);
+
+ if (ret)
+ return ret;
+
+ wmem = PTR_ALIGN((struct nx842_workmem *)ctx->wmem, WORKMEM_ALIGN);
+ coproc = per_cpu(coproc_inst, smp_processor_id());
+
+ ret = -EINVAL;
+ if (coproc && coproc->vas.rxwin) {
+ wmem->txwin = nx842_alloc_txwin(coproc);
+ if (!IS_ERR(wmem->txwin))
+ return 0;
+
+ ret = PTR_ERR(wmem->txwin);
+ }
+
+ return ret;
+}
+
+void nx842_powernv_crypto_exit_vas(struct crypto_tfm *tfm)
+{
+ struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct nx842_workmem *wmem;
+
+ wmem = PTR_ALIGN((struct nx842_workmem *)ctx->wmem, WORKMEM_ALIGN);
+
+ if (wmem && wmem->txwin)
+ vas_win_close(wmem->txwin);
+
+ nx842_crypto_exit(tfm);
+}
+
static int nx842_powernv_crypto_init(struct crypto_tfm *tfm)
{
return nx842_crypto_init(tfm, &nx842_powernv_driver);
@@ -618,21 +1027,31 @@ static __init int nx842_powernv_init(void)
BUILD_BUG_ON(DDE_BUFFER_ALIGN % DDE_BUFFER_SIZE_MULT);
BUILD_BUG_ON(DDE_BUFFER_SIZE_MULT % DDE_BUFFER_LAST_MULT);
- for_each_compatible_node(dn, NULL, "ibm,power-nx")
- nx842_powernv_probe(dn);
+ for_each_compatible_node(dn, NULL, "ibm,power9-nx") {
+ ret = nx842_powernv_probe_vas(dn);
+ if (ret) {
+ nx842_delete_coprocs();
+ return ret;
+ }
+ }
- if (!nx842_ct)
- return -ENODEV;
+ if (list_empty(&nx842_coprocs)) {
+ for_each_compatible_node(dn, NULL, "ibm,power-nx")
+ nx842_powernv_probe(dn);
- ret = crypto_register_alg(&nx842_powernv_alg);
- if (ret) {
- struct nx842_coproc *coproc, *n;
+ if (!nx842_ct)
+ return -ENODEV;
- list_for_each_entry_safe(coproc, n, &nx842_coprocs, list) {
- list_del(&coproc->list);
- kfree(coproc);
- }
+ nx842_powernv_exec = nx842_exec_icswx;
+ } else {
+ nx842_powernv_exec = nx842_exec_vas;
+ nx842_powernv_alg.cra_init = nx842_powernv_crypto_init_vas;
+ nx842_powernv_alg.cra_exit = nx842_powernv_crypto_exit_vas;
+ }
+ ret = crypto_register_alg(&nx842_powernv_alg);
+ if (ret) {
+ nx842_delete_coprocs();
return ret;
}
@@ -642,13 +1061,8 @@ module_init(nx842_powernv_init);
static void __exit nx842_powernv_exit(void)
{
- struct nx842_coproc *coproc, *n;
-
crypto_unregister_alg(&nx842_powernv_alg);
- list_for_each_entry_safe(coproc, n, &nx842_coprocs, list) {
- list_del(&coproc->list);
- kfree(coproc);
- }
+ nx842_delete_coprocs();
}
module_exit(nx842_powernv_exit);
diff --git a/drivers/crypto/nx/nx-842.c b/drivers/crypto/nx/nx-842.c
index d94e25df503b..da3cb8c35ec7 100644
--- a/drivers/crypto/nx/nx-842.c
+++ b/drivers/crypto/nx/nx-842.c
@@ -116,7 +116,7 @@ int nx842_crypto_init(struct crypto_tfm *tfm, struct nx842_driver *driver)
spin_lock_init(&ctx->lock);
ctx->driver = driver;
- ctx->wmem = kmalloc(driver->workmem_size, GFP_KERNEL);
+ ctx->wmem = kzalloc(driver->workmem_size, GFP_KERNEL);
ctx->sbounce = (u8 *)__get_free_pages(GFP_KERNEL, BOUNCE_BUFFER_ORDER);
ctx->dbounce = (u8 *)__get_free_pages(GFP_KERNEL, BOUNCE_BUFFER_ORDER);
if (!ctx->wmem || !ctx->sbounce || !ctx->dbounce) {
diff --git a/drivers/crypto/nx/nx-842.h b/drivers/crypto/nx/nx-842.h
index a4eee3bba937..bb2f31792683 100644
--- a/drivers/crypto/nx/nx-842.h
+++ b/drivers/crypto/nx/nx-842.h
@@ -76,9 +76,17 @@
#define CSB_CC_DECRYPT_OVERFLOW (64)
/* asym crypt codes */
#define CSB_CC_MINV_OVERFLOW (128)
+/*
+ * HW error - Job did not finish in the maximum time allowed.
+ * Job terminated.
+ */
+#define CSB_CC_HW_EXPIRED_TIMER (224)
/* These are reserved for hypervisor use */
#define CSB_CC_HYP_RESERVE_START (240)
#define CSB_CC_HYP_RESERVE_END (253)
+#define CSB_CC_HYP_RESERVE_P9_END (251)
+/* No valid interrupt server (P9 or later). */
+#define CSB_CC_HYP_RESERVE_NO_INTR_SERVER (252)
#define CSB_CC_HYP_NO_HW (254)
#define CSB_CC_HYP_HANG_ABORTED (255)
@@ -100,11 +108,6 @@ static inline unsigned long nx842_get_pa(void *addr)
return page_to_phys(vmalloc_to_page(addr)) + offset_in_page(addr);
}
-/* Get/Set bit fields */
-#define MASK_LSH(m) (__builtin_ffsl(m) - 1)
-#define GET_FIELD(v, m) (((v) & (m)) >> MASK_LSH(m))
-#define SET_FIELD(v, m, val) (((v) & ~(m)) | (((val) << MASK_LSH(m)) & (m)))
-
/**
* This provides the driver's constraints. Different nx842 implementations
* may have varying requirements. The constraints are:
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index 5120a17731d0..c376a3ee7c2c 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -1095,6 +1095,7 @@ static int omap_aes_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
dev_err(dev, "can't get IRQ resource\n");
+ err = irq;
goto err_irq;
}
diff --git a/drivers/crypto/omap-des.c b/drivers/crypto/omap-des.c
index 0bcab00e0ff5..d37c9506c36c 100644
--- a/drivers/crypto/omap-des.c
+++ b/drivers/crypto/omap-des.c
@@ -1023,7 +1023,8 @@ static int omap_des_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
- dev_err(dev, "can't get IRQ resource\n");
+ dev_err(dev, "can't get IRQ resource: %d\n", irq);
+ err = irq;
goto err_irq;
}
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index 9ad9d399daf1..c40ac30ec002 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -2133,7 +2133,7 @@ data_err:
static int omap_sham_remove(struct platform_device *pdev)
{
- static struct omap_sham_dev *dd;
+ struct omap_sham_dev *dd;
int i, j;
dd = platform_get_drvdata(pdev);
diff --git a/drivers/crypto/qat/qat_common/adf_aer.c b/drivers/crypto/qat/qat_common/adf_aer.c
index d3e25c37dc33..da8a2d3b5e9a 100644
--- a/drivers/crypto/qat/qat_common/adf_aer.c
+++ b/drivers/crypto/qat/qat_common/adf_aer.c
@@ -208,7 +208,7 @@ static pci_ers_result_t adf_slot_reset(struct pci_dev *pdev)
static void adf_resume(struct pci_dev *pdev)
{
dev_info(&pdev->dev, "Acceleration driver reset completed\n");
- dev_info(&pdev->dev, "Device is up and runnig\n");
+ dev_info(&pdev->dev, "Device is up and running\n");
}
static const struct pci_error_handlers adf_err_handler = {
diff --git a/drivers/crypto/rockchip/rk3288_crypto.c b/drivers/crypto/rockchip/rk3288_crypto.c
index d0f80c6241f9..c9d622abd90c 100644
--- a/drivers/crypto/rockchip/rk3288_crypto.c
+++ b/drivers/crypto/rockchip/rk3288_crypto.c
@@ -169,50 +169,82 @@ static irqreturn_t rk_crypto_irq_handle(int irq, void *dev_id)
{
struct rk_crypto_info *dev = platform_get_drvdata(dev_id);
u32 interrupt_status;
- int err = 0;
spin_lock(&dev->lock);
interrupt_status = CRYPTO_READ(dev, RK_CRYPTO_INTSTS);
CRYPTO_WRITE(dev, RK_CRYPTO_INTSTS, interrupt_status);
+
if (interrupt_status & 0x0a) {
dev_warn(dev->dev, "DMA Error\n");
- err = -EFAULT;
- } else if (interrupt_status & 0x05) {
- err = dev->update(dev);
+ dev->err = -EFAULT;
}
- if (err)
- dev->complete(dev, err);
+ tasklet_schedule(&dev->done_task);
+
spin_unlock(&dev->lock);
return IRQ_HANDLED;
}
-static void rk_crypto_tasklet_cb(unsigned long data)
+static int rk_crypto_enqueue(struct rk_crypto_info *dev,
+ struct crypto_async_request *async_req)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ ret = crypto_enqueue_request(&dev->queue, async_req);
+ if (dev->busy) {
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return ret;
+ }
+ dev->busy = true;
+ spin_unlock_irqrestore(&dev->lock, flags);
+ tasklet_schedule(&dev->queue_task);
+
+ return ret;
+}
+
+static void rk_crypto_queue_task_cb(unsigned long data)
{
struct rk_crypto_info *dev = (struct rk_crypto_info *)data;
struct crypto_async_request *async_req, *backlog;
unsigned long flags;
int err = 0;
+ dev->err = 0;
spin_lock_irqsave(&dev->lock, flags);
backlog = crypto_get_backlog(&dev->queue);
async_req = crypto_dequeue_request(&dev->queue);
- spin_unlock_irqrestore(&dev->lock, flags);
+
if (!async_req) {
- dev_err(dev->dev, "async_req is NULL !!\n");
+ dev->busy = false;
+ spin_unlock_irqrestore(&dev->lock, flags);
return;
}
+ spin_unlock_irqrestore(&dev->lock, flags);
+
if (backlog) {
backlog->complete(backlog, -EINPROGRESS);
backlog = NULL;
}
- if (crypto_tfm_alg_type(async_req->tfm) == CRYPTO_ALG_TYPE_ABLKCIPHER)
- dev->ablk_req = ablkcipher_request_cast(async_req);
- else
- dev->ahash_req = ahash_request_cast(async_req);
+ dev->async_req = async_req;
err = dev->start(dev);
if (err)
- dev->complete(dev, err);
+ dev->complete(dev->async_req, err);
+}
+
+static void rk_crypto_done_task_cb(unsigned long data)
+{
+ struct rk_crypto_info *dev = (struct rk_crypto_info *)data;
+
+ if (dev->err) {
+ dev->complete(dev->async_req, dev->err);
+ return;
+ }
+
+ dev->err = dev->update(dev);
+ if (dev->err)
+ dev->complete(dev->async_req, dev->err);
}
static struct rk_crypto_tmp *rk_cipher_algs[] = {
@@ -361,14 +393,18 @@ static int rk_crypto_probe(struct platform_device *pdev)
crypto_info->dev = &pdev->dev;
platform_set_drvdata(pdev, crypto_info);
- tasklet_init(&crypto_info->crypto_tasklet,
- rk_crypto_tasklet_cb, (unsigned long)crypto_info);
+ tasklet_init(&crypto_info->queue_task,
+ rk_crypto_queue_task_cb, (unsigned long)crypto_info);
+ tasklet_init(&crypto_info->done_task,
+ rk_crypto_done_task_cb, (unsigned long)crypto_info);
crypto_init_queue(&crypto_info->queue, 50);
crypto_info->enable_clk = rk_crypto_enable_clk;
crypto_info->disable_clk = rk_crypto_disable_clk;
crypto_info->load_data = rk_load_data;
crypto_info->unload_data = rk_unload_data;
+ crypto_info->enqueue = rk_crypto_enqueue;
+ crypto_info->busy = false;
err = rk_crypto_register(crypto_info);
if (err) {
@@ -380,7 +416,8 @@ static int rk_crypto_probe(struct platform_device *pdev)
return 0;
err_register_alg:
- tasklet_kill(&crypto_info->crypto_tasklet);
+ tasklet_kill(&crypto_info->queue_task);
+ tasklet_kill(&crypto_info->done_task);
err_crypto:
return err;
}
@@ -390,7 +427,8 @@ static int rk_crypto_remove(struct platform_device *pdev)
struct rk_crypto_info *crypto_tmp = platform_get_drvdata(pdev);
rk_crypto_unregister();
- tasklet_kill(&crypto_tmp->crypto_tasklet);
+ tasklet_kill(&crypto_tmp->done_task);
+ tasklet_kill(&crypto_tmp->queue_task);
return 0;
}
diff --git a/drivers/crypto/rockchip/rk3288_crypto.h b/drivers/crypto/rockchip/rk3288_crypto.h
index d7b71fea320b..ab6a1b4c40f0 100644
--- a/drivers/crypto/rockchip/rk3288_crypto.h
+++ b/drivers/crypto/rockchip/rk3288_crypto.h
@@ -190,9 +190,10 @@ struct rk_crypto_info {
void __iomem *reg;
int irq;
struct crypto_queue queue;
- struct tasklet_struct crypto_tasklet;
- struct ablkcipher_request *ablk_req;
- struct ahash_request *ahash_req;
+ struct tasklet_struct queue_task;
+ struct tasklet_struct done_task;
+ struct crypto_async_request *async_req;
+ int err;
/* device lock */
spinlock_t lock;
@@ -208,18 +209,20 @@ struct rk_crypto_info {
size_t nents;
unsigned int total;
unsigned int count;
- u32 mode;
dma_addr_t addr_in;
dma_addr_t addr_out;
+ bool busy;
int (*start)(struct rk_crypto_info *dev);
int (*update)(struct rk_crypto_info *dev);
- void (*complete)(struct rk_crypto_info *dev, int err);
+ void (*complete)(struct crypto_async_request *base, int err);
int (*enable_clk)(struct rk_crypto_info *dev);
void (*disable_clk)(struct rk_crypto_info *dev);
int (*load_data)(struct rk_crypto_info *dev,
struct scatterlist *sg_src,
struct scatterlist *sg_dst);
void (*unload_data)(struct rk_crypto_info *dev);
+ int (*enqueue)(struct rk_crypto_info *dev,
+ struct crypto_async_request *async_req);
};
/* the private variable of hash */
@@ -232,12 +235,14 @@ struct rk_ahash_ctx {
/* the privete variable of hash for fallback */
struct rk_ahash_rctx {
struct ahash_request fallback_req;
+ u32 mode;
};
/* the private variable of cipher */
struct rk_cipher_ctx {
struct rk_crypto_info *dev;
unsigned int keylen;
+ u32 mode;
};
enum alg_type {
diff --git a/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c b/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c
index b5a3afe222e4..639c15c5364b 100644
--- a/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c
+++ b/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c
@@ -15,35 +15,19 @@
#define RK_CRYPTO_DEC BIT(0)
-static void rk_crypto_complete(struct rk_crypto_info *dev, int err)
+static void rk_crypto_complete(struct crypto_async_request *base, int err)
{
- if (dev->ablk_req->base.complete)
- dev->ablk_req->base.complete(&dev->ablk_req->base, err);
+ if (base->complete)
+ base->complete(base, err);
}
static int rk_handle_req(struct rk_crypto_info *dev,
struct ablkcipher_request *req)
{
- unsigned long flags;
- int err;
-
if (!IS_ALIGNED(req->nbytes, dev->align_size))
return -EINVAL;
-
- dev->left_bytes = req->nbytes;
- dev->total = req->nbytes;
- dev->sg_src = req->src;
- dev->first = req->src;
- dev->nents = sg_nents(req->src);
- dev->sg_dst = req->dst;
- dev->aligned = 1;
- dev->ablk_req = req;
-
- spin_lock_irqsave(&dev->lock, flags);
- err = ablkcipher_enqueue_request(&dev->queue, req);
- spin_unlock_irqrestore(&dev->lock, flags);
- tasklet_schedule(&dev->crypto_tasklet);
- return err;
+ else
+ return dev->enqueue(dev, &req->base);
}
static int rk_aes_setkey(struct crypto_ablkcipher *cipher,
@@ -93,7 +77,7 @@ static int rk_aes_ecb_encrypt(struct ablkcipher_request *req)
struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
struct rk_crypto_info *dev = ctx->dev;
- dev->mode = RK_CRYPTO_AES_ECB_MODE;
+ ctx->mode = RK_CRYPTO_AES_ECB_MODE;
return rk_handle_req(dev, req);
}
@@ -103,7 +87,7 @@ static int rk_aes_ecb_decrypt(struct ablkcipher_request *req)
struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
struct rk_crypto_info *dev = ctx->dev;
- dev->mode = RK_CRYPTO_AES_ECB_MODE | RK_CRYPTO_DEC;
+ ctx->mode = RK_CRYPTO_AES_ECB_MODE | RK_CRYPTO_DEC;
return rk_handle_req(dev, req);
}
@@ -113,7 +97,7 @@ static int rk_aes_cbc_encrypt(struct ablkcipher_request *req)
struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
struct rk_crypto_info *dev = ctx->dev;
- dev->mode = RK_CRYPTO_AES_CBC_MODE;
+ ctx->mode = RK_CRYPTO_AES_CBC_MODE;
return rk_handle_req(dev, req);
}
@@ -123,7 +107,7 @@ static int rk_aes_cbc_decrypt(struct ablkcipher_request *req)
struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
struct rk_crypto_info *dev = ctx->dev;
- dev->mode = RK_CRYPTO_AES_CBC_MODE | RK_CRYPTO_DEC;
+ ctx->mode = RK_CRYPTO_AES_CBC_MODE | RK_CRYPTO_DEC;
return rk_handle_req(dev, req);
}
@@ -133,7 +117,7 @@ static int rk_des_ecb_encrypt(struct ablkcipher_request *req)
struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
struct rk_crypto_info *dev = ctx->dev;
- dev->mode = 0;
+ ctx->mode = 0;
return rk_handle_req(dev, req);
}
@@ -143,7 +127,7 @@ static int rk_des_ecb_decrypt(struct ablkcipher_request *req)
struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
struct rk_crypto_info *dev = ctx->dev;
- dev->mode = RK_CRYPTO_DEC;
+ ctx->mode = RK_CRYPTO_DEC;
return rk_handle_req(dev, req);
}
@@ -153,7 +137,7 @@ static int rk_des_cbc_encrypt(struct ablkcipher_request *req)
struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
struct rk_crypto_info *dev = ctx->dev;
- dev->mode = RK_CRYPTO_TDES_CHAINMODE_CBC;
+ ctx->mode = RK_CRYPTO_TDES_CHAINMODE_CBC;
return rk_handle_req(dev, req);
}
@@ -163,7 +147,7 @@ static int rk_des_cbc_decrypt(struct ablkcipher_request *req)
struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
struct rk_crypto_info *dev = ctx->dev;
- dev->mode = RK_CRYPTO_TDES_CHAINMODE_CBC | RK_CRYPTO_DEC;
+ ctx->mode = RK_CRYPTO_TDES_CHAINMODE_CBC | RK_CRYPTO_DEC;
return rk_handle_req(dev, req);
}
@@ -173,7 +157,7 @@ static int rk_des3_ede_ecb_encrypt(struct ablkcipher_request *req)
struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
struct rk_crypto_info *dev = ctx->dev;
- dev->mode = RK_CRYPTO_TDES_SELECT;
+ ctx->mode = RK_CRYPTO_TDES_SELECT;
return rk_handle_req(dev, req);
}
@@ -183,7 +167,7 @@ static int rk_des3_ede_ecb_decrypt(struct ablkcipher_request *req)
struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
struct rk_crypto_info *dev = ctx->dev;
- dev->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_DEC;
+ ctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_DEC;
return rk_handle_req(dev, req);
}
@@ -193,7 +177,7 @@ static int rk_des3_ede_cbc_encrypt(struct ablkcipher_request *req)
struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
struct rk_crypto_info *dev = ctx->dev;
- dev->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC;
+ ctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC;
return rk_handle_req(dev, req);
}
@@ -203,15 +187,16 @@ static int rk_des3_ede_cbc_decrypt(struct ablkcipher_request *req)
struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
struct rk_crypto_info *dev = ctx->dev;
- dev->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC |
+ ctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC |
RK_CRYPTO_DEC;
return rk_handle_req(dev, req);
}
static void rk_ablk_hw_init(struct rk_crypto_info *dev)
{
- struct crypto_ablkcipher *cipher =
- crypto_ablkcipher_reqtfm(dev->ablk_req);
+ struct ablkcipher_request *req =
+ ablkcipher_request_cast(dev->async_req);
+ struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(req);
struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(cipher);
u32 ivsize, block, conf_reg = 0;
@@ -220,25 +205,23 @@ static void rk_ablk_hw_init(struct rk_crypto_info *dev)
ivsize = crypto_ablkcipher_ivsize(cipher);
if (block == DES_BLOCK_SIZE) {
- dev->mode |= RK_CRYPTO_TDES_FIFO_MODE |
+ ctx->mode |= RK_CRYPTO_TDES_FIFO_MODE |
RK_CRYPTO_TDES_BYTESWAP_KEY |
RK_CRYPTO_TDES_BYTESWAP_IV;
- CRYPTO_WRITE(dev, RK_CRYPTO_TDES_CTRL, dev->mode);
- memcpy_toio(dev->reg + RK_CRYPTO_TDES_IV_0,
- dev->ablk_req->info, ivsize);
+ CRYPTO_WRITE(dev, RK_CRYPTO_TDES_CTRL, ctx->mode);
+ memcpy_toio(dev->reg + RK_CRYPTO_TDES_IV_0, req->info, ivsize);
conf_reg = RK_CRYPTO_DESSEL;
} else {
- dev->mode |= RK_CRYPTO_AES_FIFO_MODE |
+ ctx->mode |= RK_CRYPTO_AES_FIFO_MODE |
RK_CRYPTO_AES_KEY_CHANGE |
RK_CRYPTO_AES_BYTESWAP_KEY |
RK_CRYPTO_AES_BYTESWAP_IV;
if (ctx->keylen == AES_KEYSIZE_192)
- dev->mode |= RK_CRYPTO_AES_192BIT_key;
+ ctx->mode |= RK_CRYPTO_AES_192BIT_key;
else if (ctx->keylen == AES_KEYSIZE_256)
- dev->mode |= RK_CRYPTO_AES_256BIT_key;
- CRYPTO_WRITE(dev, RK_CRYPTO_AES_CTRL, dev->mode);
- memcpy_toio(dev->reg + RK_CRYPTO_AES_IV_0,
- dev->ablk_req->info, ivsize);
+ ctx->mode |= RK_CRYPTO_AES_256BIT_key;
+ CRYPTO_WRITE(dev, RK_CRYPTO_AES_CTRL, ctx->mode);
+ memcpy_toio(dev->reg + RK_CRYPTO_AES_IV_0, req->info, ivsize);
}
conf_reg |= RK_CRYPTO_BYTESWAP_BTFIFO |
RK_CRYPTO_BYTESWAP_BRFIFO;
@@ -268,8 +251,18 @@ static int rk_set_data_start(struct rk_crypto_info *dev)
static int rk_ablk_start(struct rk_crypto_info *dev)
{
+ struct ablkcipher_request *req =
+ ablkcipher_request_cast(dev->async_req);
unsigned long flags;
- int err;
+ int err = 0;
+
+ dev->left_bytes = req->nbytes;
+ dev->total = req->nbytes;
+ dev->sg_src = req->src;
+ dev->first = req->src;
+ dev->nents = sg_nents(req->src);
+ dev->sg_dst = req->dst;
+ dev->aligned = 1;
spin_lock_irqsave(&dev->lock, flags);
rk_ablk_hw_init(dev);
@@ -280,15 +273,16 @@ static int rk_ablk_start(struct rk_crypto_info *dev)
static void rk_iv_copyback(struct rk_crypto_info *dev)
{
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(dev->ablk_req);
+ struct ablkcipher_request *req =
+ ablkcipher_request_cast(dev->async_req);
+ struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
u32 ivsize = crypto_ablkcipher_ivsize(tfm);
if (ivsize == DES_BLOCK_SIZE)
- memcpy_fromio(dev->ablk_req->info,
- dev->reg + RK_CRYPTO_TDES_IV_0, ivsize);
+ memcpy_fromio(req->info, dev->reg + RK_CRYPTO_TDES_IV_0,
+ ivsize);
else if (ivsize == AES_BLOCK_SIZE)
- memcpy_fromio(dev->ablk_req->info,
- dev->reg + RK_CRYPTO_AES_IV_0, ivsize);
+ memcpy_fromio(req->info, dev->reg + RK_CRYPTO_AES_IV_0, ivsize);
}
/* return:
@@ -298,10 +292,12 @@ static void rk_iv_copyback(struct rk_crypto_info *dev)
static int rk_ablk_rx(struct rk_crypto_info *dev)
{
int err = 0;
+ struct ablkcipher_request *req =
+ ablkcipher_request_cast(dev->async_req);
dev->unload_data(dev);
if (!dev->aligned) {
- if (!sg_pcopy_from_buffer(dev->ablk_req->dst, dev->nents,
+ if (!sg_pcopy_from_buffer(req->dst, dev->nents,
dev->addr_vir, dev->count,
dev->total - dev->left_bytes -
dev->count)) {
@@ -324,7 +320,8 @@ static int rk_ablk_rx(struct rk_crypto_info *dev)
} else {
rk_iv_copyback(dev);
/* here show the calculation is over without any err */
- dev->complete(dev, 0);
+ dev->complete(dev->async_req, 0);
+ tasklet_schedule(&dev->queue_task);
}
out_rx:
return err;
diff --git a/drivers/crypto/rockchip/rk3288_crypto_ahash.c b/drivers/crypto/rockchip/rk3288_crypto_ahash.c
index 718588219f75..821a506b9e17 100644
--- a/drivers/crypto/rockchip/rk3288_crypto_ahash.c
+++ b/drivers/crypto/rockchip/rk3288_crypto_ahash.c
@@ -40,14 +40,16 @@ static int zero_message_process(struct ahash_request *req)
return 0;
}
-static void rk_ahash_crypto_complete(struct rk_crypto_info *dev, int err)
+static void rk_ahash_crypto_complete(struct crypto_async_request *base, int err)
{
- if (dev->ahash_req->base.complete)
- dev->ahash_req->base.complete(&dev->ahash_req->base, err);
+ if (base->complete)
+ base->complete(base, err);
}
static void rk_ahash_reg_init(struct rk_crypto_info *dev)
{
+ struct ahash_request *req = ahash_request_cast(dev->async_req);
+ struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
int reg_status = 0;
reg_status = CRYPTO_READ(dev, RK_CRYPTO_CTRL) |
@@ -67,7 +69,7 @@ static void rk_ahash_reg_init(struct rk_crypto_info *dev)
CRYPTO_WRITE(dev, RK_CRYPTO_INTSTS, RK_CRYPTO_HRDMA_ERR_INT |
RK_CRYPTO_HRDMA_DONE_INT);
- CRYPTO_WRITE(dev, RK_CRYPTO_HASH_CTRL, dev->mode |
+ CRYPTO_WRITE(dev, RK_CRYPTO_HASH_CTRL, rctx->mode |
RK_CRYPTO_HASH_SWAP_DO);
CRYPTO_WRITE(dev, RK_CRYPTO_CONF, RK_CRYPTO_BYTESWAP_HRFIFO |
@@ -164,64 +166,13 @@ static int rk_ahash_export(struct ahash_request *req, void *out)
static int rk_ahash_digest(struct ahash_request *req)
{
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct rk_ahash_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
- struct rk_crypto_info *dev = NULL;
- unsigned long flags;
- int ret;
+ struct rk_crypto_info *dev = tctx->dev;
if (!req->nbytes)
return zero_message_process(req);
-
- dev = tctx->dev;
- dev->total = req->nbytes;
- dev->left_bytes = req->nbytes;
- dev->aligned = 0;
- dev->mode = 0;
- dev->align_size = 4;
- dev->sg_dst = NULL;
- dev->sg_src = req->src;
- dev->first = req->src;
- dev->nents = sg_nents(req->src);
-
- switch (crypto_ahash_digestsize(tfm)) {
- case SHA1_DIGEST_SIZE:
- dev->mode = RK_CRYPTO_HASH_SHA1;
- break;
- case SHA256_DIGEST_SIZE:
- dev->mode = RK_CRYPTO_HASH_SHA256;
- break;
- case MD5_DIGEST_SIZE:
- dev->mode = RK_CRYPTO_HASH_MD5;
- break;
- default:
- return -EINVAL;
- }
-
- rk_ahash_reg_init(dev);
-
- spin_lock_irqsave(&dev->lock, flags);
- ret = crypto_enqueue_request(&dev->queue, &req->base);
- spin_unlock_irqrestore(&dev->lock, flags);
-
- tasklet_schedule(&dev->crypto_tasklet);
-
- /*
- * it will take some time to process date after last dma transmission.
- *
- * waiting time is relative with the last date len,
- * so cannot set a fixed time here.
- * 10-50 makes system not call here frequently wasting
- * efficiency, and make it response quickly when dma
- * complete.
- */
- while (!CRYPTO_READ(dev, RK_CRYPTO_HASH_STS))
- usleep_range(10, 50);
-
- memcpy_fromio(req->result, dev->reg + RK_CRYPTO_HASH_DOUT_0,
- crypto_ahash_digestsize(tfm));
-
- return 0;
+ else
+ return dev->enqueue(dev, &req->base);
}
static void crypto_ahash_dma_start(struct rk_crypto_info *dev)
@@ -244,12 +195,45 @@ static int rk_ahash_set_data_start(struct rk_crypto_info *dev)
static int rk_ahash_start(struct rk_crypto_info *dev)
{
+ struct ahash_request *req = ahash_request_cast(dev->async_req);
+ struct crypto_ahash *tfm;
+ struct rk_ahash_rctx *rctx;
+
+ dev->total = req->nbytes;
+ dev->left_bytes = req->nbytes;
+ dev->aligned = 0;
+ dev->align_size = 4;
+ dev->sg_dst = NULL;
+ dev->sg_src = req->src;
+ dev->first = req->src;
+ dev->nents = sg_nents(req->src);
+ rctx = ahash_request_ctx(req);
+ rctx->mode = 0;
+
+ tfm = crypto_ahash_reqtfm(req);
+ switch (crypto_ahash_digestsize(tfm)) {
+ case SHA1_DIGEST_SIZE:
+ rctx->mode = RK_CRYPTO_HASH_SHA1;
+ break;
+ case SHA256_DIGEST_SIZE:
+ rctx->mode = RK_CRYPTO_HASH_SHA256;
+ break;
+ case MD5_DIGEST_SIZE:
+ rctx->mode = RK_CRYPTO_HASH_MD5;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ rk_ahash_reg_init(dev);
return rk_ahash_set_data_start(dev);
}
static int rk_ahash_crypto_rx(struct rk_crypto_info *dev)
{
int err = 0;
+ struct ahash_request *req = ahash_request_cast(dev->async_req);
+ struct crypto_ahash *tfm;
dev->unload_data(dev);
if (dev->left_bytes) {
@@ -264,7 +248,24 @@ static int rk_ahash_crypto_rx(struct rk_crypto_info *dev)
}
err = rk_ahash_set_data_start(dev);
} else {
- dev->complete(dev, 0);
+ /*
+ * it will take some time to process date after last dma
+ * transmission.
+ *
+ * waiting time is relative with the last date len,
+ * so cannot set a fixed time here.
+ * 10us makes system not call here frequently wasting
+ * efficiency, and make it response quickly when dma
+ * complete.
+ */
+ while (!CRYPTO_READ(dev, RK_CRYPTO_HASH_STS))
+ udelay(10);
+
+ tfm = crypto_ahash_reqtfm(req);
+ memcpy_fromio(req->result, dev->reg + RK_CRYPTO_HASH_DOUT_0,
+ crypto_ahash_digestsize(tfm));
+ dev->complete(dev->async_req, 0);
+ tasklet_schedule(&dev->queue_task);
}
out_rx:
diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c
index 1d9ecd368b5b..08e7bdcaa6e3 100644
--- a/drivers/crypto/sahara.c
+++ b/drivers/crypto/sahara.c
@@ -202,7 +202,6 @@ struct sahara_dev {
struct completion dma_completion;
struct sahara_ctx *ctx;
- spinlock_t lock;
struct crypto_queue queue;
unsigned long flags;
@@ -543,10 +542,10 @@ static int sahara_hw_descriptor_create(struct sahara_dev *dev)
unmap_out:
dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
- DMA_TO_DEVICE);
+ DMA_FROM_DEVICE);
unmap_in:
dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
- DMA_FROM_DEVICE);
+ DMA_TO_DEVICE);
return -EINVAL;
}
@@ -594,9 +593,9 @@ static int sahara_aes_process(struct ablkcipher_request *req)
}
dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
- DMA_TO_DEVICE);
- dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
DMA_FROM_DEVICE);
+ dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
+ DMA_TO_DEVICE);
return 0;
}
@@ -1376,13 +1375,13 @@ static void sahara_unregister_algs(struct sahara_dev *dev)
crypto_unregister_ahash(&sha_v4_algs[i]);
}
-static struct platform_device_id sahara_platform_ids[] = {
+static const struct platform_device_id sahara_platform_ids[] = {
{ .name = "sahara-imx27" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(platform, sahara_platform_ids);
-static struct of_device_id sahara_dt_ids[] = {
+static const struct of_device_id sahara_dt_ids[] = {
{ .compatible = "fsl,imx53-sahara" },
{ .compatible = "fsl,imx27-sahara" },
{ /* sentinel */ }
@@ -1487,7 +1486,6 @@ static int sahara_probe(struct platform_device *pdev)
crypto_init_queue(&dev->queue, SAHARA_QUEUE_LENGTH);
- spin_lock_init(&dev->lock);
mutex_init(&dev->queue_mutex);
dev_ptr = dev;
diff --git a/drivers/crypto/stm32/Kconfig b/drivers/crypto/stm32/Kconfig
index 09b4ec87c212..602332e02729 100644
--- a/drivers/crypto/stm32/Kconfig
+++ b/drivers/crypto/stm32/Kconfig
@@ -1,7 +1,20 @@
-config CRYPTO_DEV_STM32
- tristate "Support for STM32 crypto accelerators"
+config CRC_DEV_STM32
+ tristate "Support for STM32 crc accelerators"
depends on ARCH_STM32
select CRYPTO_HASH
help
This enables support for the CRC32 hw accelerator which can be found
- on STMicroelectronis STM32 SOC.
+ on STMicroelectronics STM32 SOC.
+
+config HASH_DEV_STM32
+ tristate "Support for STM32 hash accelerators"
+ depends on ARCH_STM32
+ depends on HAS_DMA
+ select CRYPTO_HASH
+ select CRYPTO_MD5
+ select CRYPTO_SHA1
+ select CRYPTO_SHA256
+ select CRYPTO_ENGINE
+ help
+ This enables support for the HASH hw accelerator which can be found
+ on STMicroelectronics STM32 SOC.
diff --git a/drivers/crypto/stm32/Makefile b/drivers/crypto/stm32/Makefile
index 73b4c6e47f5f..73cd56cad0cc 100644
--- a/drivers/crypto/stm32/Makefile
+++ b/drivers/crypto/stm32/Makefile
@@ -1,2 +1,2 @@
-obj-$(CONFIG_CRYPTO_DEV_STM32) += stm32_cryp.o
-stm32_cryp-objs := stm32_crc32.o
+obj-$(CONFIG_CRC_DEV_STM32) += stm32_crc32.o
+obj-$(CONFIG_HASH_DEV_STM32) += stm32-hash.o \ No newline at end of file
diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c
new file mode 100644
index 000000000000..b585ce54a802
--- /dev/null
+++ b/drivers/crypto/stm32/stm32-hash.c
@@ -0,0 +1,1575 @@
+/*
+ * This file is part of STM32 Crypto driver for Linux.
+ *
+ * Copyright (C) 2017, STMicroelectronics - All Rights Reserved
+ * Author(s): Lionel DEBIEVE <lionel.debieve@st.com> for STMicroelectronics.
+ *
+ * License terms: GPL V2.0.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/crypto.h>
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+
+#include <crypto/engine.h>
+#include <crypto/hash.h>
+#include <crypto/md5.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/sha.h>
+#include <crypto/internal/hash.h>
+
+#define HASH_CR 0x00
+#define HASH_DIN 0x04
+#define HASH_STR 0x08
+#define HASH_IMR 0x20
+#define HASH_SR 0x24
+#define HASH_CSR(x) (0x0F8 + ((x) * 0x04))
+#define HASH_HREG(x) (0x310 + ((x) * 0x04))
+#define HASH_HWCFGR 0x3F0
+#define HASH_VER 0x3F4
+#define HASH_ID 0x3F8
+
+/* Control Register */
+#define HASH_CR_INIT BIT(2)
+#define HASH_CR_DMAE BIT(3)
+#define HASH_CR_DATATYPE_POS 4
+#define HASH_CR_MODE BIT(6)
+#define HASH_CR_MDMAT BIT(13)
+#define HASH_CR_DMAA BIT(14)
+#define HASH_CR_LKEY BIT(16)
+
+#define HASH_CR_ALGO_SHA1 0x0
+#define HASH_CR_ALGO_MD5 0x80
+#define HASH_CR_ALGO_SHA224 0x40000
+#define HASH_CR_ALGO_SHA256 0x40080
+
+/* Interrupt */
+#define HASH_DINIE BIT(0)
+#define HASH_DCIE BIT(1)
+
+/* Interrupt Mask */
+#define HASH_MASK_CALC_COMPLETION BIT(0)
+#define HASH_MASK_DATA_INPUT BIT(1)
+
+/* Context swap register */
+#define HASH_CSR_REGISTER_NUMBER 53
+
+/* Status Flags */
+#define HASH_SR_DATA_INPUT_READY BIT(0)
+#define HASH_SR_OUTPUT_READY BIT(1)
+#define HASH_SR_DMA_ACTIVE BIT(2)
+#define HASH_SR_BUSY BIT(3)
+
+/* STR Register */
+#define HASH_STR_NBLW_MASK GENMASK(4, 0)
+#define HASH_STR_DCAL BIT(8)
+
+#define HASH_FLAGS_INIT BIT(0)
+#define HASH_FLAGS_OUTPUT_READY BIT(1)
+#define HASH_FLAGS_CPU BIT(2)
+#define HASH_FLAGS_DMA_READY BIT(3)
+#define HASH_FLAGS_DMA_ACTIVE BIT(4)
+#define HASH_FLAGS_HMAC_INIT BIT(5)
+#define HASH_FLAGS_HMAC_FINAL BIT(6)
+#define HASH_FLAGS_HMAC_KEY BIT(7)
+
+#define HASH_FLAGS_FINAL BIT(15)
+#define HASH_FLAGS_FINUP BIT(16)
+#define HASH_FLAGS_ALGO_MASK GENMASK(21, 18)
+#define HASH_FLAGS_MD5 BIT(18)
+#define HASH_FLAGS_SHA1 BIT(19)
+#define HASH_FLAGS_SHA224 BIT(20)
+#define HASH_FLAGS_SHA256 BIT(21)
+#define HASH_FLAGS_ERRORS BIT(22)
+#define HASH_FLAGS_HMAC BIT(23)
+
+#define HASH_OP_UPDATE 1
+#define HASH_OP_FINAL 2
+
+enum stm32_hash_data_format {
+ HASH_DATA_32_BITS = 0x0,
+ HASH_DATA_16_BITS = 0x1,
+ HASH_DATA_8_BITS = 0x2,
+ HASH_DATA_1_BIT = 0x3
+};
+
+#define HASH_BUFLEN 256
+#define HASH_LONG_KEY 64
+#define HASH_MAX_KEY_SIZE (SHA256_BLOCK_SIZE * 8)
+#define HASH_QUEUE_LENGTH 16
+#define HASH_DMA_THRESHOLD 50
+
+struct stm32_hash_ctx {
+ struct stm32_hash_dev *hdev;
+ unsigned long flags;
+
+ u8 key[HASH_MAX_KEY_SIZE];
+ int keylen;
+};
+
+struct stm32_hash_request_ctx {
+ struct stm32_hash_dev *hdev;
+ unsigned long flags;
+ unsigned long op;
+
+ u8 digest[SHA256_DIGEST_SIZE] __aligned(sizeof(u32));
+ size_t digcnt;
+ size_t bufcnt;
+ size_t buflen;
+
+ /* DMA */
+ struct scatterlist *sg;
+ unsigned int offset;
+ unsigned int total;
+ struct scatterlist sg_key;
+
+ dma_addr_t dma_addr;
+ size_t dma_ct;
+ int nents;
+
+ u8 data_type;
+
+ u8 buffer[HASH_BUFLEN] __aligned(sizeof(u32));
+
+ /* Export Context */
+ u32 *hw_context;
+};
+
+struct stm32_hash_algs_info {
+ struct ahash_alg *algs_list;
+ size_t size;
+};
+
+struct stm32_hash_pdata {
+ struct stm32_hash_algs_info *algs_info;
+ size_t algs_info_size;
+};
+
+struct stm32_hash_dev {
+ struct list_head list;
+ struct device *dev;
+ struct clk *clk;
+ struct reset_control *rst;
+ void __iomem *io_base;
+ phys_addr_t phys_base;
+ u32 dma_mode;
+ u32 dma_maxburst;
+
+ spinlock_t lock; /* lock to protect queue */
+
+ struct ahash_request *req;
+ struct crypto_engine *engine;
+
+ int err;
+ unsigned long flags;
+
+ struct dma_chan *dma_lch;
+ struct completion dma_completion;
+
+ const struct stm32_hash_pdata *pdata;
+};
+
+struct stm32_hash_drv {
+ struct list_head dev_list;
+ spinlock_t lock; /* List protection access */
+};
+
+static struct stm32_hash_drv stm32_hash = {
+ .dev_list = LIST_HEAD_INIT(stm32_hash.dev_list),
+ .lock = __SPIN_LOCK_UNLOCKED(stm32_hash.lock),
+};
+
+static void stm32_hash_dma_callback(void *param);
+
+static inline u32 stm32_hash_read(struct stm32_hash_dev *hdev, u32 offset)
+{
+ return readl_relaxed(hdev->io_base + offset);
+}
+
+static inline void stm32_hash_write(struct stm32_hash_dev *hdev,
+ u32 offset, u32 value)
+{
+ writel_relaxed(value, hdev->io_base + offset);
+}
+
+static inline int stm32_hash_wait_busy(struct stm32_hash_dev *hdev)
+{
+ u32 status;
+
+ return readl_relaxed_poll_timeout(hdev->io_base + HASH_SR, status,
+ !(status & HASH_SR_BUSY), 10, 10000);
+}
+
+static void stm32_hash_set_nblw(struct stm32_hash_dev *hdev, int length)
+{
+ u32 reg;
+
+ reg = stm32_hash_read(hdev, HASH_STR);
+ reg &= ~(HASH_STR_NBLW_MASK);
+ reg |= (8U * ((length) % 4U));
+ stm32_hash_write(hdev, HASH_STR, reg);
+}
+
+static int stm32_hash_write_key(struct stm32_hash_dev *hdev)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(hdev->req);
+ struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ u32 reg;
+ int keylen = ctx->keylen;
+ void *key = ctx->key;
+
+ if (keylen) {
+ stm32_hash_set_nblw(hdev, keylen);
+
+ while (keylen > 0) {
+ stm32_hash_write(hdev, HASH_DIN, *(u32 *)key);
+ keylen -= 4;
+ key += 4;
+ }
+
+ reg = stm32_hash_read(hdev, HASH_STR);
+ reg |= HASH_STR_DCAL;
+ stm32_hash_write(hdev, HASH_STR, reg);
+
+ return -EINPROGRESS;
+ }
+
+ return 0;
+}
+
+static void stm32_hash_write_ctrl(struct stm32_hash_dev *hdev)
+{
+ struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(hdev->req);
+ struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ u32 reg = HASH_CR_INIT;
+
+ if (!(hdev->flags & HASH_FLAGS_INIT)) {
+ switch (rctx->flags & HASH_FLAGS_ALGO_MASK) {
+ case HASH_FLAGS_MD5:
+ reg |= HASH_CR_ALGO_MD5;
+ break;
+ case HASH_FLAGS_SHA1:
+ reg |= HASH_CR_ALGO_SHA1;
+ break;
+ case HASH_FLAGS_SHA224:
+ reg |= HASH_CR_ALGO_SHA224;
+ break;
+ case HASH_FLAGS_SHA256:
+ reg |= HASH_CR_ALGO_SHA256;
+ break;
+ default:
+ reg |= HASH_CR_ALGO_MD5;
+ }
+
+ reg |= (rctx->data_type << HASH_CR_DATATYPE_POS);
+
+ if (rctx->flags & HASH_FLAGS_HMAC) {
+ hdev->flags |= HASH_FLAGS_HMAC;
+ reg |= HASH_CR_MODE;
+ if (ctx->keylen > HASH_LONG_KEY)
+ reg |= HASH_CR_LKEY;
+ }
+
+ stm32_hash_write(hdev, HASH_IMR, HASH_DCIE);
+
+ stm32_hash_write(hdev, HASH_CR, reg);
+
+ hdev->flags |= HASH_FLAGS_INIT;
+
+ dev_dbg(hdev->dev, "Write Control %x\n", reg);
+ }
+}
+
+static void stm32_hash_append_sg(struct stm32_hash_request_ctx *rctx)
+{
+ size_t count;
+
+ while ((rctx->bufcnt < rctx->buflen) && rctx->total) {
+ count = min(rctx->sg->length - rctx->offset, rctx->total);
+ count = min(count, rctx->buflen - rctx->bufcnt);
+
+ if (count <= 0) {
+ if ((rctx->sg->length == 0) && !sg_is_last(rctx->sg)) {
+ rctx->sg = sg_next(rctx->sg);
+ continue;
+ } else {
+ break;
+ }
+ }
+
+ scatterwalk_map_and_copy(rctx->buffer + rctx->bufcnt, rctx->sg,
+ rctx->offset, count, 0);
+
+ rctx->bufcnt += count;
+ rctx->offset += count;
+ rctx->total -= count;
+
+ if (rctx->offset == rctx->sg->length) {
+ rctx->sg = sg_next(rctx->sg);
+ if (rctx->sg)
+ rctx->offset = 0;
+ else
+ rctx->total = 0;
+ }
+ }
+}
+
+static int stm32_hash_xmit_cpu(struct stm32_hash_dev *hdev,
+ const u8 *buf, size_t length, int final)
+{
+ unsigned int count, len32;
+ const u32 *buffer = (const u32 *)buf;
+ u32 reg;
+
+ if (final)
+ hdev->flags |= HASH_FLAGS_FINAL;
+
+ len32 = DIV_ROUND_UP(length, sizeof(u32));
+
+ dev_dbg(hdev->dev, "%s: length: %d, final: %x len32 %i\n",
+ __func__, length, final, len32);
+
+ hdev->flags |= HASH_FLAGS_CPU;
+
+ stm32_hash_write_ctrl(hdev);
+
+ if (stm32_hash_wait_busy(hdev))
+ return -ETIMEDOUT;
+
+ if ((hdev->flags & HASH_FLAGS_HMAC) &&
+ (hdev->flags & ~HASH_FLAGS_HMAC_KEY)) {
+ hdev->flags |= HASH_FLAGS_HMAC_KEY;
+ stm32_hash_write_key(hdev);
+ if (stm32_hash_wait_busy(hdev))
+ return -ETIMEDOUT;
+ }
+
+ for (count = 0; count < len32; count++)
+ stm32_hash_write(hdev, HASH_DIN, buffer[count]);
+
+ if (final) {
+ stm32_hash_set_nblw(hdev, length);
+ reg = stm32_hash_read(hdev, HASH_STR);
+ reg |= HASH_STR_DCAL;
+ stm32_hash_write(hdev, HASH_STR, reg);
+ if (hdev->flags & HASH_FLAGS_HMAC) {
+ if (stm32_hash_wait_busy(hdev))
+ return -ETIMEDOUT;
+ stm32_hash_write_key(hdev);
+ }
+ return -EINPROGRESS;
+ }
+
+ return 0;
+}
+
+static int stm32_hash_update_cpu(struct stm32_hash_dev *hdev)
+{
+ struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
+ int bufcnt, err = 0, final;
+
+ dev_dbg(hdev->dev, "%s flags %lx\n", __func__, rctx->flags);
+
+ final = (rctx->flags & HASH_FLAGS_FINUP);
+
+ while ((rctx->total >= rctx->buflen) ||
+ (rctx->bufcnt + rctx->total >= rctx->buflen)) {
+ stm32_hash_append_sg(rctx);
+ bufcnt = rctx->bufcnt;
+ rctx->bufcnt = 0;
+ err = stm32_hash_xmit_cpu(hdev, rctx->buffer, bufcnt, 0);
+ }
+
+ stm32_hash_append_sg(rctx);
+
+ if (final) {
+ bufcnt = rctx->bufcnt;
+ rctx->bufcnt = 0;
+ err = stm32_hash_xmit_cpu(hdev, rctx->buffer, bufcnt,
+ (rctx->flags & HASH_FLAGS_FINUP));
+ }
+
+ return err;
+}
+
+static int stm32_hash_xmit_dma(struct stm32_hash_dev *hdev,
+ struct scatterlist *sg, int length, int mdma)
+{
+ struct dma_async_tx_descriptor *in_desc;
+ dma_cookie_t cookie;
+ u32 reg;
+ int err;
+
+ in_desc = dmaengine_prep_slave_sg(hdev->dma_lch, sg, 1,
+ DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT |
+ DMA_CTRL_ACK);
+ if (!in_desc) {
+ dev_err(hdev->dev, "dmaengine_prep_slave error\n");
+ return -ENOMEM;
+ }
+
+ reinit_completion(&hdev->dma_completion);
+ in_desc->callback = stm32_hash_dma_callback;
+ in_desc->callback_param = hdev;
+
+ hdev->flags |= HASH_FLAGS_FINAL;
+ hdev->flags |= HASH_FLAGS_DMA_ACTIVE;
+
+ reg = stm32_hash_read(hdev, HASH_CR);
+
+ if (mdma)
+ reg |= HASH_CR_MDMAT;
+ else
+ reg &= ~HASH_CR_MDMAT;
+
+ reg |= HASH_CR_DMAE;
+
+ stm32_hash_write(hdev, HASH_CR, reg);
+
+ stm32_hash_set_nblw(hdev, length);
+
+ cookie = dmaengine_submit(in_desc);
+ err = dma_submit_error(cookie);
+ if (err)
+ return -ENOMEM;
+
+ dma_async_issue_pending(hdev->dma_lch);
+
+ if (!wait_for_completion_interruptible_timeout(&hdev->dma_completion,
+ msecs_to_jiffies(100)))
+ err = -ETIMEDOUT;
+
+ if (dma_async_is_tx_complete(hdev->dma_lch, cookie,
+ NULL, NULL) != DMA_COMPLETE)
+ err = -ETIMEDOUT;
+
+ if (err) {
+ dev_err(hdev->dev, "DMA Error %i\n", err);
+ dmaengine_terminate_all(hdev->dma_lch);
+ return err;
+ }
+
+ return -EINPROGRESS;
+}
+
+static void stm32_hash_dma_callback(void *param)
+{
+ struct stm32_hash_dev *hdev = param;
+
+ complete(&hdev->dma_completion);
+
+ hdev->flags |= HASH_FLAGS_DMA_READY;
+}
+
+static int stm32_hash_hmac_dma_send(struct stm32_hash_dev *hdev)
+{
+ struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(hdev->req);
+ struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ int err;
+
+ if (ctx->keylen < HASH_DMA_THRESHOLD || (hdev->dma_mode == 1)) {
+ err = stm32_hash_write_key(hdev);
+ if (stm32_hash_wait_busy(hdev))
+ return -ETIMEDOUT;
+ } else {
+ if (!(hdev->flags & HASH_FLAGS_HMAC_KEY))
+ sg_init_one(&rctx->sg_key, ctx->key,
+ ALIGN(ctx->keylen, sizeof(u32)));
+
+ rctx->dma_ct = dma_map_sg(hdev->dev, &rctx->sg_key, 1,
+ DMA_TO_DEVICE);
+ if (rctx->dma_ct == 0) {
+ dev_err(hdev->dev, "dma_map_sg error\n");
+ return -ENOMEM;
+ }
+
+ err = stm32_hash_xmit_dma(hdev, &rctx->sg_key, ctx->keylen, 0);
+
+ dma_unmap_sg(hdev->dev, &rctx->sg_key, 1, DMA_TO_DEVICE);
+ }
+
+ return err;
+}
+
+static int stm32_hash_dma_init(struct stm32_hash_dev *hdev)
+{
+ struct dma_slave_config dma_conf;
+ int err;
+
+ memset(&dma_conf, 0, sizeof(dma_conf));
+
+ dma_conf.direction = DMA_MEM_TO_DEV;
+ dma_conf.dst_addr = hdev->phys_base + HASH_DIN;
+ dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ dma_conf.src_maxburst = hdev->dma_maxburst;
+ dma_conf.dst_maxburst = hdev->dma_maxburst;
+ dma_conf.device_fc = false;
+
+ hdev->dma_lch = dma_request_slave_channel(hdev->dev, "in");
+ if (!hdev->dma_lch) {
+ dev_err(hdev->dev, "Couldn't acquire a slave DMA channel.\n");
+ return -EBUSY;
+ }
+
+ err = dmaengine_slave_config(hdev->dma_lch, &dma_conf);
+ if (err) {
+ dma_release_channel(hdev->dma_lch);
+ hdev->dma_lch = NULL;
+ dev_err(hdev->dev, "Couldn't configure DMA slave.\n");
+ return err;
+ }
+
+ init_completion(&hdev->dma_completion);
+
+ return 0;
+}
+
+static int stm32_hash_dma_send(struct stm32_hash_dev *hdev)
+{
+ struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
+ struct scatterlist sg[1], *tsg;
+ int err = 0, len = 0, reg, ncp;
+ unsigned int i;
+ const u32 *buffer = (const u32 *)rctx->buffer;
+
+ rctx->sg = hdev->req->src;
+ rctx->total = hdev->req->nbytes;
+
+ rctx->nents = sg_nents(rctx->sg);
+
+ if (rctx->nents < 0)
+ return -EINVAL;
+
+ stm32_hash_write_ctrl(hdev);
+
+ if (hdev->flags & HASH_FLAGS_HMAC) {
+ err = stm32_hash_hmac_dma_send(hdev);
+ if (err != -EINPROGRESS)
+ return err;
+ }
+
+ for_each_sg(rctx->sg, tsg, rctx->nents, i) {
+ len = sg->length;
+
+ sg[0] = *tsg;
+ if (sg_is_last(sg)) {
+ if (hdev->dma_mode == 1) {
+ len = (ALIGN(sg->length, 16) - 16);
+
+ ncp = sg_pcopy_to_buffer(
+ rctx->sg, rctx->nents,
+ rctx->buffer, sg->length - len,
+ rctx->total - sg->length + len);
+
+ sg->length = len;
+ } else {
+ if (!(IS_ALIGNED(sg->length, sizeof(u32)))) {
+ len = sg->length;
+ sg->length = ALIGN(sg->length,
+ sizeof(u32));
+ }
+ }
+ }
+
+ rctx->dma_ct = dma_map_sg(hdev->dev, sg, 1,
+ DMA_TO_DEVICE);
+ if (rctx->dma_ct == 0) {
+ dev_err(hdev->dev, "dma_map_sg error\n");
+ return -ENOMEM;
+ }
+
+ err = stm32_hash_xmit_dma(hdev, sg, len,
+ !sg_is_last(sg));
+
+ dma_unmap_sg(hdev->dev, sg, 1, DMA_TO_DEVICE);
+
+ if (err == -ENOMEM)
+ return err;
+ }
+
+ if (hdev->dma_mode == 1) {
+ if (stm32_hash_wait_busy(hdev))
+ return -ETIMEDOUT;
+ reg = stm32_hash_read(hdev, HASH_CR);
+ reg &= ~HASH_CR_DMAE;
+ reg |= HASH_CR_DMAA;
+ stm32_hash_write(hdev, HASH_CR, reg);
+
+ for (i = 0; i < DIV_ROUND_UP(ncp, sizeof(u32)); i++)
+ stm32_hash_write(hdev, HASH_DIN, buffer[i]);
+
+ stm32_hash_set_nblw(hdev, ncp);
+ reg = stm32_hash_read(hdev, HASH_STR);
+ reg |= HASH_STR_DCAL;
+ stm32_hash_write(hdev, HASH_STR, reg);
+ err = -EINPROGRESS;
+ }
+
+ if (hdev->flags & HASH_FLAGS_HMAC) {
+ if (stm32_hash_wait_busy(hdev))
+ return -ETIMEDOUT;
+ err = stm32_hash_hmac_dma_send(hdev);
+ }
+
+ return err;
+}
+
+static struct stm32_hash_dev *stm32_hash_find_dev(struct stm32_hash_ctx *ctx)
+{
+ struct stm32_hash_dev *hdev = NULL, *tmp;
+
+ spin_lock_bh(&stm32_hash.lock);
+ if (!ctx->hdev) {
+ list_for_each_entry(tmp, &stm32_hash.dev_list, list) {
+ hdev = tmp;
+ break;
+ }
+ ctx->hdev = hdev;
+ } else {
+ hdev = ctx->hdev;
+ }
+
+ spin_unlock_bh(&stm32_hash.lock);
+
+ return hdev;
+}
+
+static bool stm32_hash_dma_aligned_data(struct ahash_request *req)
+{
+ struct scatterlist *sg;
+ struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
+ struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
+ int i;
+
+ if (req->nbytes <= HASH_DMA_THRESHOLD)
+ return false;
+
+ if (sg_nents(req->src) > 1) {
+ if (hdev->dma_mode == 1)
+ return false;
+ for_each_sg(req->src, sg, sg_nents(req->src), i) {
+ if ((!IS_ALIGNED(sg->length, sizeof(u32))) &&
+ (!sg_is_last(sg)))
+ return false;
+ }
+ }
+
+ if (req->src->offset % 4)
+ return false;
+
+ return true;
+}
+
+static int stm32_hash_init(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
+ struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
+
+ rctx->hdev = hdev;
+
+ rctx->flags = HASH_FLAGS_CPU;
+
+ rctx->digcnt = crypto_ahash_digestsize(tfm);
+ switch (rctx->digcnt) {
+ case MD5_DIGEST_SIZE:
+ rctx->flags |= HASH_FLAGS_MD5;
+ break;
+ case SHA1_DIGEST_SIZE:
+ rctx->flags |= HASH_FLAGS_SHA1;
+ break;
+ case SHA224_DIGEST_SIZE:
+ rctx->flags |= HASH_FLAGS_SHA224;
+ break;
+ case SHA256_DIGEST_SIZE:
+ rctx->flags |= HASH_FLAGS_SHA256;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ rctx->bufcnt = 0;
+ rctx->buflen = HASH_BUFLEN;
+ rctx->total = 0;
+ rctx->offset = 0;
+ rctx->data_type = HASH_DATA_8_BITS;
+
+ memset(rctx->buffer, 0, HASH_BUFLEN);
+
+ if (ctx->flags & HASH_FLAGS_HMAC)
+ rctx->flags |= HASH_FLAGS_HMAC;
+
+ dev_dbg(hdev->dev, "%s Flags %lx\n", __func__, rctx->flags);
+
+ return 0;
+}
+
+static int stm32_hash_update_req(struct stm32_hash_dev *hdev)
+{
+ return stm32_hash_update_cpu(hdev);
+}
+
+static int stm32_hash_final_req(struct stm32_hash_dev *hdev)
+{
+ struct ahash_request *req = hdev->req;
+ struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
+ int err;
+
+ if (!(rctx->flags & HASH_FLAGS_CPU))
+ err = stm32_hash_dma_send(hdev);
+ else
+ err = stm32_hash_xmit_cpu(hdev, rctx->buffer, rctx->bufcnt, 1);
+
+ rctx->bufcnt = 0;
+
+ return err;
+}
+
+static void stm32_hash_copy_hash(struct ahash_request *req)
+{
+ struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
+ u32 *hash = (u32 *)rctx->digest;
+ unsigned int i, hashsize;
+
+ switch (rctx->flags & HASH_FLAGS_ALGO_MASK) {
+ case HASH_FLAGS_MD5:
+ hashsize = MD5_DIGEST_SIZE;
+ break;
+ case HASH_FLAGS_SHA1:
+ hashsize = SHA1_DIGEST_SIZE;
+ break;
+ case HASH_FLAGS_SHA224:
+ hashsize = SHA224_DIGEST_SIZE;
+ break;
+ case HASH_FLAGS_SHA256:
+ hashsize = SHA256_DIGEST_SIZE;
+ break;
+ default:
+ return;
+ }
+
+ for (i = 0; i < hashsize / sizeof(u32); i++)
+ hash[i] = be32_to_cpu(stm32_hash_read(rctx->hdev,
+ HASH_HREG(i)));
+}
+
+static int stm32_hash_finish(struct ahash_request *req)
+{
+ struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
+
+ if (!req->result)
+ return -EINVAL;
+
+ memcpy(req->result, rctx->digest, rctx->digcnt);
+
+ return 0;
+}
+
+static void stm32_hash_finish_req(struct ahash_request *req, int err)
+{
+ struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
+ struct stm32_hash_dev *hdev = rctx->hdev;
+
+ if (!err && (HASH_FLAGS_FINAL & hdev->flags)) {
+ stm32_hash_copy_hash(req);
+ err = stm32_hash_finish(req);
+ hdev->flags &= ~(HASH_FLAGS_FINAL | HASH_FLAGS_CPU |
+ HASH_FLAGS_INIT | HASH_FLAGS_DMA_READY |
+ HASH_FLAGS_OUTPUT_READY | HASH_FLAGS_HMAC |
+ HASH_FLAGS_HMAC_INIT | HASH_FLAGS_HMAC_FINAL |
+ HASH_FLAGS_HMAC_KEY);
+ } else {
+ rctx->flags |= HASH_FLAGS_ERRORS;
+ }
+
+ crypto_finalize_hash_request(hdev->engine, req, err);
+}
+
+static int stm32_hash_hw_init(struct stm32_hash_dev *hdev,
+ struct stm32_hash_request_ctx *rctx)
+{
+ if (!(HASH_FLAGS_INIT & hdev->flags)) {
+ stm32_hash_write(hdev, HASH_CR, HASH_CR_INIT);
+ stm32_hash_write(hdev, HASH_STR, 0);
+ stm32_hash_write(hdev, HASH_DIN, 0);
+ stm32_hash_write(hdev, HASH_IMR, 0);
+ hdev->err = 0;
+ }
+
+ return 0;
+}
+
+static int stm32_hash_handle_queue(struct stm32_hash_dev *hdev,
+ struct ahash_request *req)
+{
+ return crypto_transfer_hash_request_to_engine(hdev->engine, req);
+}
+
+static int stm32_hash_prepare_req(struct crypto_engine *engine,
+ struct ahash_request *req)
+{
+ struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
+ struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
+ struct stm32_hash_request_ctx *rctx;
+
+ if (!hdev)
+ return -ENODEV;
+
+ hdev->req = req;
+
+ rctx = ahash_request_ctx(req);
+
+ dev_dbg(hdev->dev, "processing new req, op: %lu, nbytes %d\n",
+ rctx->op, req->nbytes);
+
+ return stm32_hash_hw_init(hdev, rctx);
+}
+
+static int stm32_hash_one_request(struct crypto_engine *engine,
+ struct ahash_request *req)
+{
+ struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
+ struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
+ struct stm32_hash_request_ctx *rctx;
+ int err = 0;
+
+ if (!hdev)
+ return -ENODEV;
+
+ hdev->req = req;
+
+ rctx = ahash_request_ctx(req);
+
+ if (rctx->op == HASH_OP_UPDATE)
+ err = stm32_hash_update_req(hdev);
+ else if (rctx->op == HASH_OP_FINAL)
+ err = stm32_hash_final_req(hdev);
+
+ if (err != -EINPROGRESS)
+ /* done task will not finish it, so do it here */
+ stm32_hash_finish_req(req, err);
+
+ return 0;
+}
+
+static int stm32_hash_enqueue(struct ahash_request *req, unsigned int op)
+{
+ struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
+ struct stm32_hash_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct stm32_hash_dev *hdev = ctx->hdev;
+
+ rctx->op = op;
+
+ return stm32_hash_handle_queue(hdev, req);
+}
+
+static int stm32_hash_update(struct ahash_request *req)
+{
+ struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
+ int ret;
+
+ if (!req->nbytes || !(rctx->flags & HASH_FLAGS_CPU))
+ return 0;
+
+ rctx->total = req->nbytes;
+ rctx->sg = req->src;
+ rctx->offset = 0;
+
+ if ((rctx->bufcnt + rctx->total < rctx->buflen)) {
+ stm32_hash_append_sg(rctx);
+ return 0;
+ }
+
+ ret = stm32_hash_enqueue(req, HASH_OP_UPDATE);
+
+ if (rctx->flags & HASH_FLAGS_FINUP)
+ return ret;
+
+ return 0;
+}
+
+static int stm32_hash_final(struct ahash_request *req)
+{
+ struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
+
+ rctx->flags |= HASH_FLAGS_FINUP;
+
+ return stm32_hash_enqueue(req, HASH_OP_FINAL);
+}
+
+static int stm32_hash_finup(struct ahash_request *req)
+{
+ struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
+ struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
+ struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
+ int err1, err2;
+
+ rctx->flags |= HASH_FLAGS_FINUP;
+
+ if (hdev->dma_lch && stm32_hash_dma_aligned_data(req))
+ rctx->flags &= ~HASH_FLAGS_CPU;
+
+ err1 = stm32_hash_update(req);
+
+ if (err1 == -EINPROGRESS || err1 == -EBUSY)
+ return err1;
+
+ /*
+ * final() has to be always called to cleanup resources
+ * even if update() failed, except EINPROGRESS
+ */
+ err2 = stm32_hash_final(req);
+
+ return err1 ?: err2;
+}
+
+static int stm32_hash_digest(struct ahash_request *req)
+{
+ return stm32_hash_init(req) ?: stm32_hash_finup(req);
+}
+
+static int stm32_hash_export(struct ahash_request *req, void *out)
+{
+ struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
+ struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
+ struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
+ u32 *preg;
+ unsigned int i;
+
+ while (!(stm32_hash_read(hdev, HASH_SR) & HASH_SR_DATA_INPUT_READY))
+ cpu_relax();
+
+ rctx->hw_context = kmalloc(sizeof(u32) * (3 + HASH_CSR_REGISTER_NUMBER),
+ GFP_KERNEL);
+
+ preg = rctx->hw_context;
+
+ *preg++ = stm32_hash_read(hdev, HASH_IMR);
+ *preg++ = stm32_hash_read(hdev, HASH_STR);
+ *preg++ = stm32_hash_read(hdev, HASH_CR);
+ for (i = 0; i < HASH_CSR_REGISTER_NUMBER; i++)
+ *preg++ = stm32_hash_read(hdev, HASH_CSR(i));
+
+ memcpy(out, rctx, sizeof(*rctx));
+
+ return 0;
+}
+
+static int stm32_hash_import(struct ahash_request *req, const void *in)
+{
+ struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
+ struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
+ struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
+ const u32 *preg = in;
+ u32 reg;
+ unsigned int i;
+
+ memcpy(rctx, in, sizeof(*rctx));
+
+ preg = rctx->hw_context;
+
+ stm32_hash_write(hdev, HASH_IMR, *preg++);
+ stm32_hash_write(hdev, HASH_STR, *preg++);
+ stm32_hash_write(hdev, HASH_CR, *preg);
+ reg = *preg++ | HASH_CR_INIT;
+ stm32_hash_write(hdev, HASH_CR, reg);
+
+ for (i = 0; i < HASH_CSR_REGISTER_NUMBER; i++)
+ stm32_hash_write(hdev, HASH_CSR(i), *preg++);
+
+ kfree(rctx->hw_context);
+
+ return 0;
+}
+
+static int stm32_hash_setkey(struct crypto_ahash *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ if (keylen <= HASH_MAX_KEY_SIZE) {
+ memcpy(ctx->key, key, keylen);
+ ctx->keylen = keylen;
+ } else {
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int stm32_hash_cra_init_algs(struct crypto_tfm *tfm,
+ const char *algs_hmac_name)
+{
+ struct stm32_hash_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+ sizeof(struct stm32_hash_request_ctx));
+
+ ctx->keylen = 0;
+
+ if (algs_hmac_name)
+ ctx->flags |= HASH_FLAGS_HMAC;
+
+ return 0;
+}
+
+static int stm32_hash_cra_init(struct crypto_tfm *tfm)
+{
+ return stm32_hash_cra_init_algs(tfm, NULL);
+}
+
+static int stm32_hash_cra_md5_init(struct crypto_tfm *tfm)
+{
+ return stm32_hash_cra_init_algs(tfm, "md5");
+}
+
+static int stm32_hash_cra_sha1_init(struct crypto_tfm *tfm)
+{
+ return stm32_hash_cra_init_algs(tfm, "sha1");
+}
+
+static int stm32_hash_cra_sha224_init(struct crypto_tfm *tfm)
+{
+ return stm32_hash_cra_init_algs(tfm, "sha224");
+}
+
+static int stm32_hash_cra_sha256_init(struct crypto_tfm *tfm)
+{
+ return stm32_hash_cra_init_algs(tfm, "sha256");
+}
+
+static irqreturn_t stm32_hash_irq_thread(int irq, void *dev_id)
+{
+ struct stm32_hash_dev *hdev = dev_id;
+ int err;
+
+ if (HASH_FLAGS_CPU & hdev->flags) {
+ if (HASH_FLAGS_OUTPUT_READY & hdev->flags) {
+ hdev->flags &= ~HASH_FLAGS_OUTPUT_READY;
+ goto finish;
+ }
+ } else if (HASH_FLAGS_DMA_READY & hdev->flags) {
+ if (HASH_FLAGS_DMA_ACTIVE & hdev->flags) {
+ hdev->flags &= ~HASH_FLAGS_DMA_ACTIVE;
+ goto finish;
+ }
+ }
+
+ return IRQ_HANDLED;
+
+finish:
+ /*Finish current request */
+ stm32_hash_finish_req(hdev->req, err);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t stm32_hash_irq_handler(int irq, void *dev_id)
+{
+ struct stm32_hash_dev *hdev = dev_id;
+ u32 reg;
+
+ reg = stm32_hash_read(hdev, HASH_SR);
+ if (reg & HASH_SR_OUTPUT_READY) {
+ reg &= ~HASH_SR_OUTPUT_READY;
+ stm32_hash_write(hdev, HASH_SR, reg);
+ hdev->flags |= HASH_FLAGS_OUTPUT_READY;
+ return IRQ_WAKE_THREAD;
+ }
+
+ return IRQ_NONE;
+}
+
+static struct ahash_alg algs_md5_sha1[] = {
+ {
+ .init = stm32_hash_init,
+ .update = stm32_hash_update,
+ .final = stm32_hash_final,
+ .finup = stm32_hash_finup,
+ .digest = stm32_hash_digest,
+ .export = stm32_hash_export,
+ .import = stm32_hash_import,
+ .halg = {
+ .digestsize = MD5_DIGEST_SIZE,
+ .statesize = sizeof(struct stm32_hash_request_ctx),
+ .base = {
+ .cra_name = "md5",
+ .cra_driver_name = "stm32-md5",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct stm32_hash_ctx),
+ .cra_alignmask = 3,
+ .cra_init = stm32_hash_cra_init,
+ .cra_module = THIS_MODULE,
+ }
+ }
+ },
+ {
+ .init = stm32_hash_init,
+ .update = stm32_hash_update,
+ .final = stm32_hash_final,
+ .finup = stm32_hash_finup,
+ .digest = stm32_hash_digest,
+ .export = stm32_hash_export,
+ .import = stm32_hash_import,
+ .setkey = stm32_hash_setkey,
+ .halg = {
+ .digestsize = MD5_DIGEST_SIZE,
+ .statesize = sizeof(struct stm32_hash_request_ctx),
+ .base = {
+ .cra_name = "hmac(md5)",
+ .cra_driver_name = "stm32-hmac-md5",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct stm32_hash_ctx),
+ .cra_alignmask = 3,
+ .cra_init = stm32_hash_cra_md5_init,
+ .cra_module = THIS_MODULE,
+ }
+ }
+ },
+ {
+ .init = stm32_hash_init,
+ .update = stm32_hash_update,
+ .final = stm32_hash_final,
+ .finup = stm32_hash_finup,
+ .digest = stm32_hash_digest,
+ .export = stm32_hash_export,
+ .import = stm32_hash_import,
+ .halg = {
+ .digestsize = SHA1_DIGEST_SIZE,
+ .statesize = sizeof(struct stm32_hash_request_ctx),
+ .base = {
+ .cra_name = "sha1",
+ .cra_driver_name = "stm32-sha1",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct stm32_hash_ctx),
+ .cra_alignmask = 3,
+ .cra_init = stm32_hash_cra_init,
+ .cra_module = THIS_MODULE,
+ }
+ }
+ },
+ {
+ .init = stm32_hash_init,
+ .update = stm32_hash_update,
+ .final = stm32_hash_final,
+ .finup = stm32_hash_finup,
+ .digest = stm32_hash_digest,
+ .export = stm32_hash_export,
+ .import = stm32_hash_import,
+ .setkey = stm32_hash_setkey,
+ .halg = {
+ .digestsize = SHA1_DIGEST_SIZE,
+ .statesize = sizeof(struct stm32_hash_request_ctx),
+ .base = {
+ .cra_name = "hmac(sha1)",
+ .cra_driver_name = "stm32-hmac-sha1",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct stm32_hash_ctx),
+ .cra_alignmask = 3,
+ .cra_init = stm32_hash_cra_sha1_init,
+ .cra_module = THIS_MODULE,
+ }
+ }
+ },
+};
+
+static struct ahash_alg algs_sha224_sha256[] = {
+ {
+ .init = stm32_hash_init,
+ .update = stm32_hash_update,
+ .final = stm32_hash_final,
+ .finup = stm32_hash_finup,
+ .digest = stm32_hash_digest,
+ .export = stm32_hash_export,
+ .import = stm32_hash_import,
+ .halg = {
+ .digestsize = SHA224_DIGEST_SIZE,
+ .statesize = sizeof(struct stm32_hash_request_ctx),
+ .base = {
+ .cra_name = "sha224",
+ .cra_driver_name = "stm32-sha224",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SHA224_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct stm32_hash_ctx),
+ .cra_alignmask = 3,
+ .cra_init = stm32_hash_cra_init,
+ .cra_module = THIS_MODULE,
+ }
+ }
+ },
+ {
+ .init = stm32_hash_init,
+ .update = stm32_hash_update,
+ .final = stm32_hash_final,
+ .finup = stm32_hash_finup,
+ .digest = stm32_hash_digest,
+ .setkey = stm32_hash_setkey,
+ .export = stm32_hash_export,
+ .import = stm32_hash_import,
+ .halg = {
+ .digestsize = SHA224_DIGEST_SIZE,
+ .statesize = sizeof(struct stm32_hash_request_ctx),
+ .base = {
+ .cra_name = "hmac(sha224)",
+ .cra_driver_name = "stm32-hmac-sha224",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SHA224_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct stm32_hash_ctx),
+ .cra_alignmask = 3,
+ .cra_init = stm32_hash_cra_sha224_init,
+ .cra_module = THIS_MODULE,
+ }
+ }
+ },
+ {
+ .init = stm32_hash_init,
+ .update = stm32_hash_update,
+ .final = stm32_hash_final,
+ .finup = stm32_hash_finup,
+ .digest = stm32_hash_digest,
+ .export = stm32_hash_export,
+ .import = stm32_hash_import,
+ .halg = {
+ .digestsize = SHA256_DIGEST_SIZE,
+ .statesize = sizeof(struct stm32_hash_request_ctx),
+ .base = {
+ .cra_name = "sha256",
+ .cra_driver_name = "stm32-sha256",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct stm32_hash_ctx),
+ .cra_alignmask = 3,
+ .cra_init = stm32_hash_cra_init,
+ .cra_module = THIS_MODULE,
+ }
+ }
+ },
+ {
+ .init = stm32_hash_init,
+ .update = stm32_hash_update,
+ .final = stm32_hash_final,
+ .finup = stm32_hash_finup,
+ .digest = stm32_hash_digest,
+ .export = stm32_hash_export,
+ .import = stm32_hash_import,
+ .setkey = stm32_hash_setkey,
+ .halg = {
+ .digestsize = SHA256_DIGEST_SIZE,
+ .statesize = sizeof(struct stm32_hash_request_ctx),
+ .base = {
+ .cra_name = "hmac(sha256)",
+ .cra_driver_name = "stm32-hmac-sha256",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct stm32_hash_ctx),
+ .cra_alignmask = 3,
+ .cra_init = stm32_hash_cra_sha256_init,
+ .cra_module = THIS_MODULE,
+ }
+ }
+ },
+};
+
+static int stm32_hash_register_algs(struct stm32_hash_dev *hdev)
+{
+ unsigned int i, j;
+ int err;
+
+ for (i = 0; i < hdev->pdata->algs_info_size; i++) {
+ for (j = 0; j < hdev->pdata->algs_info[i].size; j++) {
+ err = crypto_register_ahash(
+ &hdev->pdata->algs_info[i].algs_list[j]);
+ if (err)
+ goto err_algs;
+ }
+ }
+
+ return 0;
+err_algs:
+ dev_err(hdev->dev, "Algo %d : %d failed\n", i, j);
+ for (; i--; ) {
+ for (; j--;)
+ crypto_unregister_ahash(
+ &hdev->pdata->algs_info[i].algs_list[j]);
+ }
+
+ return err;
+}
+
+static int stm32_hash_unregister_algs(struct stm32_hash_dev *hdev)
+{
+ unsigned int i, j;
+
+ for (i = 0; i < hdev->pdata->algs_info_size; i++) {
+ for (j = 0; j < hdev->pdata->algs_info[i].size; j++)
+ crypto_unregister_ahash(
+ &hdev->pdata->algs_info[i].algs_list[j]);
+ }
+
+ return 0;
+}
+
+static struct stm32_hash_algs_info stm32_hash_algs_info_stm32f4[] = {
+ {
+ .algs_list = algs_md5_sha1,
+ .size = ARRAY_SIZE(algs_md5_sha1),
+ },
+};
+
+static const struct stm32_hash_pdata stm32_hash_pdata_stm32f4 = {
+ .algs_info = stm32_hash_algs_info_stm32f4,
+ .algs_info_size = ARRAY_SIZE(stm32_hash_algs_info_stm32f4),
+};
+
+static struct stm32_hash_algs_info stm32_hash_algs_info_stm32f7[] = {
+ {
+ .algs_list = algs_md5_sha1,
+ .size = ARRAY_SIZE(algs_md5_sha1),
+ },
+ {
+ .algs_list = algs_sha224_sha256,
+ .size = ARRAY_SIZE(algs_sha224_sha256),
+ },
+};
+
+static const struct stm32_hash_pdata stm32_hash_pdata_stm32f7 = {
+ .algs_info = stm32_hash_algs_info_stm32f7,
+ .algs_info_size = ARRAY_SIZE(stm32_hash_algs_info_stm32f7),
+};
+
+static const struct of_device_id stm32_hash_of_match[] = {
+ {
+ .compatible = "st,stm32f456-hash",
+ .data = &stm32_hash_pdata_stm32f4,
+ },
+ {
+ .compatible = "st,stm32f756-hash",
+ .data = &stm32_hash_pdata_stm32f7,
+ },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, stm32_hash_of_match);
+
+static int stm32_hash_get_of_match(struct stm32_hash_dev *hdev,
+ struct device *dev)
+{
+ const struct of_device_id *match;
+ int err;
+
+ match = of_match_device(stm32_hash_of_match, dev);
+ if (!match) {
+ dev_err(dev, "no compatible OF match\n");
+ return -EINVAL;
+ }
+
+ err = of_property_read_u32(dev->of_node, "dma-maxburst",
+ &hdev->dma_maxburst);
+
+ hdev->pdata = match->data;
+
+ return err;
+}
+
+static int stm32_hash_probe(struct platform_device *pdev)
+{
+ struct stm32_hash_dev *hdev;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ int ret, irq;
+
+ hdev = devm_kzalloc(dev, sizeof(*hdev), GFP_KERNEL);
+ if (!hdev)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ hdev->io_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(hdev->io_base))
+ return PTR_ERR(hdev->io_base);
+
+ hdev->phys_base = res->start;
+
+ ret = stm32_hash_get_of_match(hdev, dev);
+ if (ret)
+ return ret;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(dev, "Cannot get IRQ resource\n");
+ return irq;
+ }
+
+ ret = devm_request_threaded_irq(dev, irq, stm32_hash_irq_handler,
+ stm32_hash_irq_thread, IRQF_ONESHOT,
+ dev_name(dev), hdev);
+ if (ret) {
+ dev_err(dev, "Cannot grab IRQ\n");
+ return ret;
+ }
+
+ hdev->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(hdev->clk)) {
+ dev_err(dev, "failed to get clock for hash (%lu)\n",
+ PTR_ERR(hdev->clk));
+ return PTR_ERR(hdev->clk);
+ }
+
+ ret = clk_prepare_enable(hdev->clk);
+ if (ret) {
+ dev_err(dev, "failed to enable hash clock (%d)\n", ret);
+ return ret;
+ }
+
+ hdev->rst = devm_reset_control_get(&pdev->dev, NULL);
+ if (!IS_ERR(hdev->rst)) {
+ reset_control_assert(hdev->rst);
+ udelay(2);
+ reset_control_deassert(hdev->rst);
+ }
+
+ hdev->dev = dev;
+
+ platform_set_drvdata(pdev, hdev);
+
+ ret = stm32_hash_dma_init(hdev);
+ if (ret)
+ dev_dbg(dev, "DMA mode not available\n");
+
+ spin_lock(&stm32_hash.lock);
+ list_add_tail(&hdev->list, &stm32_hash.dev_list);
+ spin_unlock(&stm32_hash.lock);
+
+ /* Initialize crypto engine */
+ hdev->engine = crypto_engine_alloc_init(dev, 1);
+ if (!hdev->engine) {
+ ret = -ENOMEM;
+ goto err_engine;
+ }
+
+ hdev->engine->prepare_hash_request = stm32_hash_prepare_req;
+ hdev->engine->hash_one_request = stm32_hash_one_request;
+
+ ret = crypto_engine_start(hdev->engine);
+ if (ret)
+ goto err_engine_start;
+
+ hdev->dma_mode = stm32_hash_read(hdev, HASH_HWCFGR);
+
+ /* Register algos */
+ ret = stm32_hash_register_algs(hdev);
+ if (ret)
+ goto err_algs;
+
+ dev_info(dev, "Init HASH done HW ver %x DMA mode %u\n",
+ stm32_hash_read(hdev, HASH_VER), hdev->dma_mode);
+
+ return 0;
+
+err_algs:
+err_engine_start:
+ crypto_engine_exit(hdev->engine);
+err_engine:
+ spin_lock(&stm32_hash.lock);
+ list_del(&hdev->list);
+ spin_unlock(&stm32_hash.lock);
+
+ if (hdev->dma_lch)
+ dma_release_channel(hdev->dma_lch);
+
+ clk_disable_unprepare(hdev->clk);
+
+ return ret;
+}
+
+static int stm32_hash_remove(struct platform_device *pdev)
+{
+ static struct stm32_hash_dev *hdev;
+
+ hdev = platform_get_drvdata(pdev);
+ if (!hdev)
+ return -ENODEV;
+
+ stm32_hash_unregister_algs(hdev);
+
+ crypto_engine_exit(hdev->engine);
+
+ spin_lock(&stm32_hash.lock);
+ list_del(&hdev->list);
+ spin_unlock(&stm32_hash.lock);
+
+ if (hdev->dma_lch)
+ dma_release_channel(hdev->dma_lch);
+
+ clk_disable_unprepare(hdev->clk);
+
+ return 0;
+}
+
+static struct platform_driver stm32_hash_driver = {
+ .probe = stm32_hash_probe,
+ .remove = stm32_hash_remove,
+ .driver = {
+ .name = "stm32-hash",
+ .of_match_table = stm32_hash_of_match,
+ }
+};
+
+module_platform_driver(stm32_hash_driver);
+
+MODULE_DESCRIPTION("STM32 SHA1/224/256 & MD5 (HMAC) hw accelerator driver");
+MODULE_AUTHOR("Lionel Debieve <lionel.debieve@st.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/crypto/stm32/stm32_crc32.c b/drivers/crypto/stm32/stm32_crc32.c
index ec83b1e6bfe8..090582baecfe 100644
--- a/drivers/crypto/stm32/stm32_crc32.c
+++ b/drivers/crypto/stm32/stm32_crc32.c
@@ -107,12 +107,12 @@ static int stm32_crc_init(struct shash_desc *desc)
spin_unlock_bh(&crc_list.lock);
/* Reset, set key, poly and configure in bit reverse mode */
- writel(bitrev32(mctx->key), ctx->crc->regs + CRC_INIT);
- writel(bitrev32(mctx->poly), ctx->crc->regs + CRC_POL);
- writel(CRC_CR_RESET | CRC_CR_REVERSE, ctx->crc->regs + CRC_CR);
+ writel_relaxed(bitrev32(mctx->key), ctx->crc->regs + CRC_INIT);
+ writel_relaxed(bitrev32(mctx->poly), ctx->crc->regs + CRC_POL);
+ writel_relaxed(CRC_CR_RESET | CRC_CR_REVERSE, ctx->crc->regs + CRC_CR);
/* Store partial result */
- ctx->partial = readl(ctx->crc->regs + CRC_DR);
+ ctx->partial = readl_relaxed(ctx->crc->regs + CRC_DR);
ctx->crc->nb_pending_bytes = 0;
return 0;
@@ -135,7 +135,8 @@ static int stm32_crc_update(struct shash_desc *desc, const u8 *d8,
if (crc->nb_pending_bytes == sizeof(u32)) {
/* Process completed pending data */
- writel(*(u32 *)crc->pending_data, crc->regs + CRC_DR);
+ writel_relaxed(*(u32 *)crc->pending_data,
+ crc->regs + CRC_DR);
crc->nb_pending_bytes = 0;
}
}
@@ -143,10 +144,10 @@ static int stm32_crc_update(struct shash_desc *desc, const u8 *d8,
d32 = (u32 *)d8;
for (i = 0; i < length >> 2; i++)
/* Process 32 bits data */
- writel(*(d32++), crc->regs + CRC_DR);
+ writel_relaxed(*(d32++), crc->regs + CRC_DR);
/* Store partial result */
- ctx->partial = readl(crc->regs + CRC_DR);
+ ctx->partial = readl_relaxed(crc->regs + CRC_DR);
/* Check for pending data (non 32 bits) */
length &= 3;
@@ -295,7 +296,7 @@ static int stm32_crc_remove(struct platform_device *pdev)
list_del(&crc->list);
spin_unlock(&crc_list.lock);
- crypto_unregister_shash(algs);
+ crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
clk_disable_unprepare(crc->clk);
diff --git a/drivers/crypto/sunxi-ss/Makefile b/drivers/crypto/sunxi-ss/Makefile
index 8f4c7a273141..ccb893219079 100644
--- a/drivers/crypto/sunxi-ss/Makefile
+++ b/drivers/crypto/sunxi-ss/Makefile
@@ -1,2 +1,3 @@
obj-$(CONFIG_CRYPTO_DEV_SUN4I_SS) += sun4i-ss.o
sun4i-ss-y += sun4i-ss-core.o sun4i-ss-hash.o sun4i-ss-cipher.o
+sun4i-ss-$(CONFIG_CRYPTO_DEV_SUN4I_SS_PRNG) += sun4i-ss-prng.o
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-core.c b/drivers/crypto/sunxi-ss/sun4i-ss-core.c
index 02ad8256e900..1547cbe13dc2 100644
--- a/drivers/crypto/sunxi-ss/sun4i-ss-core.c
+++ b/drivers/crypto/sunxi-ss/sun4i-ss-core.c
@@ -213,6 +213,23 @@ static struct sun4i_ss_alg_template ss_algs[] = {
}
}
},
+#ifdef CONFIG_CRYPTO_DEV_SUN4I_SS_PRNG
+{
+ .type = CRYPTO_ALG_TYPE_RNG,
+ .alg.rng = {
+ .base = {
+ .cra_name = "stdrng",
+ .cra_driver_name = "sun4i_ss_rng",
+ .cra_priority = 300,
+ .cra_ctxsize = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .generate = sun4i_ss_prng_generate,
+ .seed = sun4i_ss_prng_seed,
+ .seedsize = SS_SEED_LEN / BITS_PER_BYTE,
+ }
+},
+#endif
};
static int sun4i_ss_probe(struct platform_device *pdev)
@@ -355,6 +372,13 @@ static int sun4i_ss_probe(struct platform_device *pdev)
goto error_alg;
}
break;
+ case CRYPTO_ALG_TYPE_RNG:
+ err = crypto_register_rng(&ss_algs[i].alg.rng);
+ if (err) {
+ dev_err(ss->dev, "Fail to register %s\n",
+ ss_algs[i].alg.rng.base.cra_name);
+ }
+ break;
}
}
platform_set_drvdata(pdev, ss);
@@ -369,6 +393,9 @@ error_alg:
case CRYPTO_ALG_TYPE_AHASH:
crypto_unregister_ahash(&ss_algs[i].alg.hash);
break;
+ case CRYPTO_ALG_TYPE_RNG:
+ crypto_unregister_rng(&ss_algs[i].alg.rng);
+ break;
}
}
if (ss->reset)
@@ -393,6 +420,9 @@ static int sun4i_ss_remove(struct platform_device *pdev)
case CRYPTO_ALG_TYPE_AHASH:
crypto_unregister_ahash(&ss_algs[i].alg.hash);
break;
+ case CRYPTO_ALG_TYPE_RNG:
+ crypto_unregister_rng(&ss_algs[i].alg.rng);
+ break;
}
}
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-prng.c b/drivers/crypto/sunxi-ss/sun4i-ss-prng.c
new file mode 100644
index 000000000000..0d01d1624252
--- /dev/null
+++ b/drivers/crypto/sunxi-ss/sun4i-ss-prng.c
@@ -0,0 +1,56 @@
+#include "sun4i-ss.h"
+
+int sun4i_ss_prng_seed(struct crypto_rng *tfm, const u8 *seed,
+ unsigned int slen)
+{
+ struct sun4i_ss_alg_template *algt;
+ struct rng_alg *alg = crypto_rng_alg(tfm);
+
+ algt = container_of(alg, struct sun4i_ss_alg_template, alg.rng);
+ memcpy(algt->ss->seed, seed, slen);
+
+ return 0;
+}
+
+int sun4i_ss_prng_generate(struct crypto_rng *tfm, const u8 *src,
+ unsigned int slen, u8 *dst, unsigned int dlen)
+{
+ struct sun4i_ss_alg_template *algt;
+ struct rng_alg *alg = crypto_rng_alg(tfm);
+ int i;
+ u32 v;
+ u32 *data = (u32 *)dst;
+ const u32 mode = SS_OP_PRNG | SS_PRNG_CONTINUE | SS_ENABLED;
+ size_t len;
+ struct sun4i_ss_ctx *ss;
+ unsigned int todo = (dlen / 4) * 4;
+
+ algt = container_of(alg, struct sun4i_ss_alg_template, alg.rng);
+ ss = algt->ss;
+
+ spin_lock(&ss->slock);
+
+ writel(mode, ss->base + SS_CTL);
+
+ while (todo > 0) {
+ /* write the seed */
+ for (i = 0; i < SS_SEED_LEN / BITS_PER_LONG; i++)
+ writel(ss->seed[i], ss->base + SS_KEY0 + i * 4);
+
+ /* Read the random data */
+ len = min_t(size_t, SS_DATA_LEN / BITS_PER_BYTE, todo);
+ readsl(ss->base + SS_TXFIFO, data, len / 4);
+ data += len / 4;
+ todo -= len;
+
+ /* Update the seed */
+ for (i = 0; i < SS_SEED_LEN / BITS_PER_LONG; i++) {
+ v = readl(ss->base + SS_KEY0 + i * 4);
+ ss->seed[i] = v;
+ }
+ }
+
+ writel(0, ss->base + SS_CTL);
+ spin_unlock(&ss->slock);
+ return dlen;
+}
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss.h b/drivers/crypto/sunxi-ss/sun4i-ss.h
index a0e1efc1cb2a..f3ac90692ac6 100644
--- a/drivers/crypto/sunxi-ss/sun4i-ss.h
+++ b/drivers/crypto/sunxi-ss/sun4i-ss.h
@@ -32,6 +32,7 @@
#include <crypto/aes.h>
#include <crypto/des.h>
#include <crypto/internal/rng.h>
+#include <crypto/rng.h>
#define SS_CTL 0x00
#define SS_KEY0 0x04
@@ -127,6 +128,9 @@
#define SS_RXFIFO_EMP_INT_ENABLE (1 << 2)
#define SS_TXFIFO_AVA_INT_ENABLE (1 << 0)
+#define SS_SEED_LEN 192
+#define SS_DATA_LEN 160
+
struct sun4i_ss_ctx {
void __iomem *base;
int irq;
@@ -136,6 +140,9 @@ struct sun4i_ss_ctx {
struct device *dev;
struct resource *res;
spinlock_t slock; /* control the use of the device */
+#ifdef CONFIG_CRYPTO_DEV_SUN4I_SS_PRNG
+ u32 seed[SS_SEED_LEN / BITS_PER_LONG];
+#endif
};
struct sun4i_ss_alg_template {
@@ -144,6 +151,7 @@ struct sun4i_ss_alg_template {
union {
struct skcipher_alg crypto;
struct ahash_alg hash;
+ struct rng_alg rng;
} alg;
struct sun4i_ss_ctx *ss;
};
@@ -201,3 +209,6 @@ int sun4i_ss_des_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen);
int sun4i_ss_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen);
+int sun4i_ss_prng_generate(struct crypto_rng *tfm, const u8 *src,
+ unsigned int slen, u8 *dst, unsigned int dlen);
+int sun4i_ss_prng_seed(struct crypto_rng *tfm, const u8 *seed, unsigned int slen);
diff --git a/drivers/crypto/virtio/virtio_crypto_algs.c b/drivers/crypto/virtio/virtio_crypto_algs.c
index 49defda4e03d..5035b0dc1e40 100644
--- a/drivers/crypto/virtio/virtio_crypto_algs.c
+++ b/drivers/crypto/virtio/virtio_crypto_algs.c
@@ -27,12 +27,68 @@
#include <uapi/linux/virtio_crypto.h>
#include "virtio_crypto_common.h"
+
+struct virtio_crypto_ablkcipher_ctx {
+ struct virtio_crypto *vcrypto;
+ struct crypto_tfm *tfm;
+
+ struct virtio_crypto_sym_session_info enc_sess_info;
+ struct virtio_crypto_sym_session_info dec_sess_info;
+};
+
+struct virtio_crypto_sym_request {
+ struct virtio_crypto_request base;
+
+ /* Cipher or aead */
+ uint32_t type;
+ struct virtio_crypto_ablkcipher_ctx *ablkcipher_ctx;
+ struct ablkcipher_request *ablkcipher_req;
+ uint8_t *iv;
+ /* Encryption? */
+ bool encrypt;
+};
+
/*
* The algs_lock protects the below global virtio_crypto_active_devs
* and crypto algorithms registion.
*/
static DEFINE_MUTEX(algs_lock);
static unsigned int virtio_crypto_active_devs;
+static void virtio_crypto_ablkcipher_finalize_req(
+ struct virtio_crypto_sym_request *vc_sym_req,
+ struct ablkcipher_request *req,
+ int err);
+
+static void virtio_crypto_dataq_sym_callback
+ (struct virtio_crypto_request *vc_req, int len)
+{
+ struct virtio_crypto_sym_request *vc_sym_req =
+ container_of(vc_req, struct virtio_crypto_sym_request, base);
+ struct ablkcipher_request *ablk_req;
+ int error;
+
+ /* Finish the encrypt or decrypt process */
+ if (vc_sym_req->type == VIRTIO_CRYPTO_SYM_OP_CIPHER) {
+ switch (vc_req->status) {
+ case VIRTIO_CRYPTO_OK:
+ error = 0;
+ break;
+ case VIRTIO_CRYPTO_INVSESS:
+ case VIRTIO_CRYPTO_ERR:
+ error = -EINVAL;
+ break;
+ case VIRTIO_CRYPTO_BADMSG:
+ error = -EBADMSG;
+ break;
+ default:
+ error = -EIO;
+ break;
+ }
+ ablk_req = vc_sym_req->ablkcipher_req;
+ virtio_crypto_ablkcipher_finalize_req(vc_sym_req,
+ ablk_req, error);
+ }
+}
static u64 virtio_crypto_alg_sg_nents_length(struct scatterlist *sg)
{
@@ -286,13 +342,14 @@ static int virtio_crypto_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
}
static int
-__virtio_crypto_ablkcipher_do_req(struct virtio_crypto_request *vc_req,
+__virtio_crypto_ablkcipher_do_req(struct virtio_crypto_sym_request *vc_sym_req,
struct ablkcipher_request *req,
struct data_queue *data_vq)
{
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
+ struct virtio_crypto_ablkcipher_ctx *ctx = vc_sym_req->ablkcipher_ctx;
+ struct virtio_crypto_request *vc_req = &vc_sym_req->base;
unsigned int ivsize = crypto_ablkcipher_ivsize(tfm);
- struct virtio_crypto_ablkcipher_ctx *ctx = vc_req->ablkcipher_ctx;
struct virtio_crypto *vcrypto = ctx->vcrypto;
struct virtio_crypto_op_data_req *req_data;
int src_nents, dst_nents;
@@ -326,9 +383,9 @@ __virtio_crypto_ablkcipher_do_req(struct virtio_crypto_request *vc_req,
}
vc_req->req_data = req_data;
- vc_req->type = VIRTIO_CRYPTO_SYM_OP_CIPHER;
+ vc_sym_req->type = VIRTIO_CRYPTO_SYM_OP_CIPHER;
/* Head of operation */
- if (vc_req->encrypt) {
+ if (vc_sym_req->encrypt) {
req_data->header.session_id =
cpu_to_le64(ctx->enc_sess_info.session_id);
req_data->header.opcode =
@@ -383,7 +440,7 @@ __virtio_crypto_ablkcipher_do_req(struct virtio_crypto_request *vc_req,
memcpy(iv, req->info, ivsize);
sg_init_one(&iv_sg, iv, ivsize);
sgs[num_out++] = &iv_sg;
- vc_req->iv = iv;
+ vc_sym_req->iv = iv;
/* Source data */
for (i = 0; i < src_nents; i++)
@@ -421,15 +478,18 @@ static int virtio_crypto_ablkcipher_encrypt(struct ablkcipher_request *req)
{
struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
struct virtio_crypto_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(atfm);
- struct virtio_crypto_request *vc_req = ablkcipher_request_ctx(req);
+ struct virtio_crypto_sym_request *vc_sym_req =
+ ablkcipher_request_ctx(req);
+ struct virtio_crypto_request *vc_req = &vc_sym_req->base;
struct virtio_crypto *vcrypto = ctx->vcrypto;
/* Use the first data virtqueue as default */
struct data_queue *data_vq = &vcrypto->data_vq[0];
- vc_req->ablkcipher_ctx = ctx;
- vc_req->ablkcipher_req = req;
- vc_req->encrypt = true;
vc_req->dataq = data_vq;
+ vc_req->alg_cb = virtio_crypto_dataq_sym_callback;
+ vc_sym_req->ablkcipher_ctx = ctx;
+ vc_sym_req->ablkcipher_req = req;
+ vc_sym_req->encrypt = true;
return crypto_transfer_cipher_request_to_engine(data_vq->engine, req);
}
@@ -438,16 +498,18 @@ static int virtio_crypto_ablkcipher_decrypt(struct ablkcipher_request *req)
{
struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
struct virtio_crypto_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(atfm);
- struct virtio_crypto_request *vc_req = ablkcipher_request_ctx(req);
+ struct virtio_crypto_sym_request *vc_sym_req =
+ ablkcipher_request_ctx(req);
+ struct virtio_crypto_request *vc_req = &vc_sym_req->base;
struct virtio_crypto *vcrypto = ctx->vcrypto;
/* Use the first data virtqueue as default */
struct data_queue *data_vq = &vcrypto->data_vq[0];
- vc_req->ablkcipher_ctx = ctx;
- vc_req->ablkcipher_req = req;
-
- vc_req->encrypt = false;
vc_req->dataq = data_vq;
+ vc_req->alg_cb = virtio_crypto_dataq_sym_callback;
+ vc_sym_req->ablkcipher_ctx = ctx;
+ vc_sym_req->ablkcipher_req = req;
+ vc_sym_req->encrypt = false;
return crypto_transfer_cipher_request_to_engine(data_vq->engine, req);
}
@@ -456,7 +518,7 @@ static int virtio_crypto_ablkcipher_init(struct crypto_tfm *tfm)
{
struct virtio_crypto_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
- tfm->crt_ablkcipher.reqsize = sizeof(struct virtio_crypto_request);
+ tfm->crt_ablkcipher.reqsize = sizeof(struct virtio_crypto_sym_request);
ctx->tfm = tfm;
return 0;
@@ -479,11 +541,13 @@ int virtio_crypto_ablkcipher_crypt_req(
struct crypto_engine *engine,
struct ablkcipher_request *req)
{
- struct virtio_crypto_request *vc_req = ablkcipher_request_ctx(req);
+ struct virtio_crypto_sym_request *vc_sym_req =
+ ablkcipher_request_ctx(req);
+ struct virtio_crypto_request *vc_req = &vc_sym_req->base;
struct data_queue *data_vq = vc_req->dataq;
int ret;
- ret = __virtio_crypto_ablkcipher_do_req(vc_req, req, data_vq);
+ ret = __virtio_crypto_ablkcipher_do_req(vc_sym_req, req, data_vq);
if (ret < 0)
return ret;
@@ -492,14 +556,15 @@ int virtio_crypto_ablkcipher_crypt_req(
return 0;
}
-void virtio_crypto_ablkcipher_finalize_req(
- struct virtio_crypto_request *vc_req,
+static void virtio_crypto_ablkcipher_finalize_req(
+ struct virtio_crypto_sym_request *vc_sym_req,
struct ablkcipher_request *req,
int err)
{
- crypto_finalize_cipher_request(vc_req->dataq->engine, req, err);
-
- virtcrypto_clear_request(vc_req);
+ crypto_finalize_cipher_request(vc_sym_req->base.dataq->engine,
+ req, err);
+ kzfree(vc_sym_req->iv);
+ virtcrypto_clear_request(&vc_sym_req->base);
}
static struct crypto_alg virtio_crypto_algs[] = { {
diff --git a/drivers/crypto/virtio/virtio_crypto_common.h b/drivers/crypto/virtio/virtio_crypto_common.h
index da6d8c0ea407..e976539a05d9 100644
--- a/drivers/crypto/virtio/virtio_crypto_common.h
+++ b/drivers/crypto/virtio/virtio_crypto_common.h
@@ -83,26 +83,16 @@ struct virtio_crypto_sym_session_info {
__u64 session_id;
};
-struct virtio_crypto_ablkcipher_ctx {
- struct virtio_crypto *vcrypto;
- struct crypto_tfm *tfm;
-
- struct virtio_crypto_sym_session_info enc_sess_info;
- struct virtio_crypto_sym_session_info dec_sess_info;
-};
+struct virtio_crypto_request;
+typedef void (*virtio_crypto_data_callback)
+ (struct virtio_crypto_request *vc_req, int len);
struct virtio_crypto_request {
- /* Cipher or aead */
- uint32_t type;
uint8_t status;
- struct virtio_crypto_ablkcipher_ctx *ablkcipher_ctx;
- struct ablkcipher_request *ablkcipher_req;
struct virtio_crypto_op_data_req *req_data;
struct scatterlist **sgs;
- uint8_t *iv;
- /* Encryption? */
- bool encrypt;
struct data_queue *dataq;
+ virtio_crypto_data_callback alg_cb;
};
int virtcrypto_devmgr_add_dev(struct virtio_crypto *vcrypto_dev);
@@ -119,10 +109,6 @@ void virtcrypto_dev_stop(struct virtio_crypto *vcrypto);
int virtio_crypto_ablkcipher_crypt_req(
struct crypto_engine *engine,
struct ablkcipher_request *req);
-void virtio_crypto_ablkcipher_finalize_req(
- struct virtio_crypto_request *vc_req,
- struct ablkcipher_request *req,
- int err);
void
virtcrypto_clear_request(struct virtio_crypto_request *vc_req);
diff --git a/drivers/crypto/virtio/virtio_crypto_core.c b/drivers/crypto/virtio/virtio_crypto_core.c
index a111cd72797b..ff1410a32c2b 100644
--- a/drivers/crypto/virtio/virtio_crypto_core.c
+++ b/drivers/crypto/virtio/virtio_crypto_core.c
@@ -29,7 +29,6 @@ void
virtcrypto_clear_request(struct virtio_crypto_request *vc_req)
{
if (vc_req) {
- kzfree(vc_req->iv);
kzfree(vc_req->req_data);
kfree(vc_req->sgs);
}
@@ -41,40 +40,18 @@ static void virtcrypto_dataq_callback(struct virtqueue *vq)
struct virtio_crypto_request *vc_req;
unsigned long flags;
unsigned int len;
- struct ablkcipher_request *ablk_req;
- int error;
unsigned int qid = vq->index;
spin_lock_irqsave(&vcrypto->data_vq[qid].lock, flags);
do {
virtqueue_disable_cb(vq);
while ((vc_req = virtqueue_get_buf(vq, &len)) != NULL) {
- if (vc_req->type == VIRTIO_CRYPTO_SYM_OP_CIPHER) {
- switch (vc_req->status) {
- case VIRTIO_CRYPTO_OK:
- error = 0;
- break;
- case VIRTIO_CRYPTO_INVSESS:
- case VIRTIO_CRYPTO_ERR:
- error = -EINVAL;
- break;
- case VIRTIO_CRYPTO_BADMSG:
- error = -EBADMSG;
- break;
- default:
- error = -EIO;
- break;
- }
- ablk_req = vc_req->ablkcipher_req;
-
- spin_unlock_irqrestore(
- &vcrypto->data_vq[qid].lock, flags);
- /* Finish the encrypt or decrypt process */
- virtio_crypto_ablkcipher_finalize_req(vc_req,
- ablk_req, error);
- spin_lock_irqsave(
- &vcrypto->data_vq[qid].lock, flags);
- }
+ spin_unlock_irqrestore(
+ &vcrypto->data_vq[qid].lock, flags);
+ if (vc_req->alg_cb)
+ vc_req->alg_cb(vc_req, len);
+ spin_lock_irqsave(
+ &vcrypto->data_vq[qid].lock, flags);
}
} while (!virtqueue_enable_cb(vq));
spin_unlock_irqrestore(&vcrypto->data_vq[qid].lock, flags);
@@ -270,7 +247,7 @@ static int virtcrypto_update_status(struct virtio_crypto *vcrypto)
return -EPERM;
}
- dev_info(&vcrypto->vdev->dev, "Accelerator is ready\n");
+ dev_info(&vcrypto->vdev->dev, "Accelerator device is ready\n");
} else {
virtcrypto_dev_stop(vcrypto);
dev_info(&vcrypto->vdev->dev, "Accelerator is not ready\n");
diff --git a/drivers/crypto/vmx/aes_ctr.c b/drivers/crypto/vmx/aes_ctr.c
index 9c26d9e8dbea..17d84217dd76 100644
--- a/drivers/crypto/vmx/aes_ctr.c
+++ b/drivers/crypto/vmx/aes_ctr.c
@@ -104,8 +104,7 @@ static void p8_aes_ctr_final(struct p8_aes_ctr_ctx *ctx,
pagefault_enable();
preempt_enable();
- crypto_xor(keystream, src, nbytes);
- memcpy(dst, keystream, nbytes);
+ crypto_xor_cpy(dst, keystream, src, nbytes);
crypto_inc(ctrblk, AES_BLOCK_SIZE);
}
diff --git a/drivers/dax/super.c b/drivers/dax/super.c
index 938eb4868f7f..557b93703532 100644
--- a/drivers/dax/super.c
+++ b/drivers/dax/super.c
@@ -46,6 +46,8 @@ void dax_read_unlock(int id)
EXPORT_SYMBOL_GPL(dax_read_unlock);
#ifdef CONFIG_BLOCK
+#include <linux/blkdev.h>
+
int bdev_dax_pgoff(struct block_device *bdev, sector_t sector, size_t size,
pgoff_t *pgoff)
{
@@ -59,6 +61,16 @@ int bdev_dax_pgoff(struct block_device *bdev, sector_t sector, size_t size,
}
EXPORT_SYMBOL(bdev_dax_pgoff);
+#if IS_ENABLED(CONFIG_FS_DAX)
+struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev)
+{
+ if (!blk_queue_dax(bdev->bd_queue))
+ return NULL;
+ return fs_dax_get_by_host(bdev->bd_disk->disk_name);
+}
+EXPORT_SYMBOL_GPL(fs_dax_get_by_bdev);
+#endif
+
/**
* __bdev_dax_supported() - Check if the device supports dax for filesystem
* @sb: The superblock of the device
@@ -189,8 +201,10 @@ static umode_t dax_visible(struct kobject *kobj, struct attribute *a, int n)
if (!dax_dev)
return 0;
- if (a == &dev_attr_write_cache.attr && !dax_dev->ops->flush)
+#ifndef CONFIG_ARCH_HAS_PMEM_API
+ if (a == &dev_attr_write_cache.attr)
return 0;
+#endif
return a->mode;
}
@@ -255,18 +269,23 @@ size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
}
EXPORT_SYMBOL_GPL(dax_copy_from_iter);
-void dax_flush(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
- size_t size)
+#ifdef CONFIG_ARCH_HAS_PMEM_API
+void arch_wb_cache_pmem(void *addr, size_t size);
+void dax_flush(struct dax_device *dax_dev, void *addr, size_t size)
{
- if (!dax_alive(dax_dev))
+ if (unlikely(!dax_alive(dax_dev)))
return;
- if (!test_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags))
+ if (unlikely(!test_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags)))
return;
- if (dax_dev->ops->flush)
- dax_dev->ops->flush(dax_dev, pgoff, addr, size);
+ arch_wb_cache_pmem(addr, size);
}
+#else
+void dax_flush(struct dax_device *dax_dev, void *addr, size_t size)
+{
+}
+#endif
EXPORT_SYMBOL_GPL(dax_flush);
void dax_write_cache(struct dax_device *dax_dev, bool wc)
diff --git a/drivers/devfreq/Kconfig b/drivers/devfreq/Kconfig
index 41254e702f1e..6a172d338f6d 100644
--- a/drivers/devfreq/Kconfig
+++ b/drivers/devfreq/Kconfig
@@ -1,6 +1,7 @@
menuconfig PM_DEVFREQ
bool "Generic Dynamic Voltage and Frequency Scaling (DVFS) support"
select SRCU
+ select PM_OPP
help
A device may have a list of frequencies and voltages available.
devfreq, a generic DVFS framework can be registered for a device
diff --git a/drivers/devfreq/devfreq-event.c b/drivers/devfreq/devfreq-event.c
index 8648b32ebc89..d67242d87744 100644
--- a/drivers/devfreq/devfreq-event.c
+++ b/drivers/devfreq/devfreq-event.c
@@ -277,8 +277,8 @@ int devfreq_event_get_edev_count(struct device *dev)
sizeof(u32));
if (count < 0) {
dev_err(dev,
- "failed to get the count of devfreq-event in %s node\n",
- dev->of_node->full_name);
+ "failed to get the count of devfreq-event in %pOF node\n",
+ dev->of_node);
return count;
}
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index dea04871b50d..a1c4ee818614 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -564,7 +564,7 @@ struct devfreq *devfreq_add_device(struct device *dev,
err = device_register(&devfreq->dev);
if (err) {
mutex_unlock(&devfreq->lock);
- goto err_out;
+ goto err_dev;
}
devfreq->trans_table = devm_kzalloc(&devfreq->dev,
@@ -610,6 +610,9 @@ err_init:
mutex_unlock(&devfreq_list_lock);
device_unregister(&devfreq->dev);
+err_dev:
+ if (devfreq)
+ kfree(devfreq);
err_out:
return ERR_PTR(err);
}
diff --git a/drivers/devfreq/governor.h b/drivers/devfreq/governor.h
index a4f2fa1091e4..cfc50a61a90d 100644
--- a/drivers/devfreq/governor.h
+++ b/drivers/devfreq/governor.h
@@ -69,4 +69,8 @@ extern int devfreq_remove_governor(struct devfreq_governor *governor);
extern int devfreq_update_status(struct devfreq *devfreq, unsigned long freq);
+static inline int devfreq_update_stats(struct devfreq *df)
+{
+ return df->profile->get_dev_status(df->dev.parent, &df->last_status);
+}
#endif /* _GOVERNOR_H */
diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c
index 56e0a0e1b600..9a302799040e 100644
--- a/drivers/dma-buf/dma-fence.c
+++ b/drivers/dma-buf/dma-fence.c
@@ -48,7 +48,7 @@ static atomic64_t dma_fence_context_counter = ATOMIC64_INIT(0);
*/
u64 dma_fence_context_alloc(unsigned num)
{
- BUG_ON(!num);
+ WARN_ON(!num);
return atomic64_add_return(num, &dma_fence_context_counter) - num;
}
EXPORT_SYMBOL(dma_fence_context_alloc);
@@ -172,7 +172,7 @@ void dma_fence_release(struct kref *kref)
trace_dma_fence_destroy(fence);
- BUG_ON(!list_empty(&fence->cb_list));
+ WARN_ON(!list_empty(&fence->cb_list));
if (fence->ops->release)
fence->ops->release(fence);
diff --git a/drivers/dma-buf/reservation.c b/drivers/dma-buf/reservation.c
index 393817e849ed..dec3a815455d 100644
--- a/drivers/dma-buf/reservation.c
+++ b/drivers/dma-buf/reservation.c
@@ -195,8 +195,7 @@ done:
if (old)
kfree_rcu(old, rcu);
- if (old_fence)
- dma_fence_put(old_fence);
+ dma_fence_put(old_fence);
}
/**
@@ -258,12 +257,71 @@ void reservation_object_add_excl_fence(struct reservation_object *obj,
dma_fence_put(rcu_dereference_protected(old->shared[i],
reservation_object_held(obj)));
- if (old_fence)
- dma_fence_put(old_fence);
+ dma_fence_put(old_fence);
}
EXPORT_SYMBOL(reservation_object_add_excl_fence);
/**
+* reservation_object_copy_fences - Copy all fences from src to dst.
+* @dst: the destination reservation object
+* @src: the source reservation object
+*
+* Copy all fences from src to dst. Both src->lock as well as dst-lock must be
+* held.
+*/
+int reservation_object_copy_fences(struct reservation_object *dst,
+ struct reservation_object *src)
+{
+ struct reservation_object_list *src_list, *dst_list;
+ struct dma_fence *old, *new;
+ size_t size;
+ unsigned i;
+
+ src_list = reservation_object_get_list(src);
+
+ if (src_list) {
+ size = offsetof(typeof(*src_list),
+ shared[src_list->shared_count]);
+ dst_list = kmalloc(size, GFP_KERNEL);
+ if (!dst_list)
+ return -ENOMEM;
+
+ dst_list->shared_count = src_list->shared_count;
+ dst_list->shared_max = src_list->shared_count;
+ for (i = 0; i < src_list->shared_count; ++i)
+ dst_list->shared[i] =
+ dma_fence_get(src_list->shared[i]);
+ } else {
+ dst_list = NULL;
+ }
+
+ kfree(dst->staged);
+ dst->staged = NULL;
+
+ src_list = reservation_object_get_list(dst);
+
+ old = reservation_object_get_excl(dst);
+ new = reservation_object_get_excl(src);
+
+ dma_fence_get(new);
+
+ preempt_disable();
+ write_seqcount_begin(&dst->seq);
+ /* write_seqcount_begin provides the necessary memory barrier */
+ RCU_INIT_POINTER(dst->fence_excl, new);
+ RCU_INIT_POINTER(dst->fence, dst_list);
+ write_seqcount_end(&dst->seq);
+ preempt_enable();
+
+ if (src_list)
+ kfree_rcu(src_list, rcu);
+ dma_fence_put(old);
+
+ return 0;
+}
+EXPORT_SYMBOL(reservation_object_copy_fences);
+
+/**
* reservation_object_get_fences_rcu - Get an object's shared and exclusive
* fences without update side lock held
* @obj: the reservation object
@@ -373,12 +431,25 @@ long reservation_object_wait_timeout_rcu(struct reservation_object *obj,
long ret = timeout ? timeout : 1;
retry:
- fence = NULL;
shared_count = 0;
seq = read_seqcount_begin(&obj->seq);
rcu_read_lock();
- if (wait_all) {
+ fence = rcu_dereference(obj->fence_excl);
+ if (fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
+ if (!dma_fence_get_rcu(fence))
+ goto unlock_retry;
+
+ if (dma_fence_is_signaled(fence)) {
+ dma_fence_put(fence);
+ fence = NULL;
+ }
+
+ } else {
+ fence = NULL;
+ }
+
+ if (!fence && wait_all) {
struct reservation_object_list *fobj =
rcu_dereference(obj->fence);
@@ -405,22 +476,6 @@ retry:
}
}
- if (!shared_count) {
- struct dma_fence *fence_excl = rcu_dereference(obj->fence_excl);
-
- if (fence_excl &&
- !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
- &fence_excl->flags)) {
- if (!dma_fence_get_rcu(fence_excl))
- goto unlock_retry;
-
- if (dma_fence_is_signaled(fence_excl))
- dma_fence_put(fence_excl);
- else
- fence = fence_excl;
- }
- }
-
rcu_read_unlock();
if (fence) {
if (read_seqcount_retry(&obj->seq, seq)) {
diff --git a/drivers/dma-buf/sw_sync.c b/drivers/dma-buf/sw_sync.c
index 69c5ff36e2f9..38cc7389a6c1 100644
--- a/drivers/dma-buf/sw_sync.c
+++ b/drivers/dma-buf/sw_sync.c
@@ -96,9 +96,9 @@ static struct sync_timeline *sync_timeline_create(const char *name)
obj->context = dma_fence_context_alloc(1);
strlcpy(obj->name, name, sizeof(obj->name));
- INIT_LIST_HEAD(&obj->child_list_head);
- INIT_LIST_HEAD(&obj->active_list_head);
- spin_lock_init(&obj->child_list_lock);
+ obj->pt_tree = RB_ROOT;
+ INIT_LIST_HEAD(&obj->pt_list);
+ spin_lock_init(&obj->lock);
sync_timeline_debug_add(obj);
@@ -125,68 +125,6 @@ static void sync_timeline_put(struct sync_timeline *obj)
kref_put(&obj->kref, sync_timeline_free);
}
-/**
- * sync_timeline_signal() - signal a status change on a sync_timeline
- * @obj: sync_timeline to signal
- * @inc: num to increment on timeline->value
- *
- * A sync implementation should call this any time one of it's fences
- * has signaled or has an error condition.
- */
-static void sync_timeline_signal(struct sync_timeline *obj, unsigned int inc)
-{
- unsigned long flags;
- struct sync_pt *pt, *next;
-
- trace_sync_timeline(obj);
-
- spin_lock_irqsave(&obj->child_list_lock, flags);
-
- obj->value += inc;
-
- list_for_each_entry_safe(pt, next, &obj->active_list_head,
- active_list) {
- if (dma_fence_is_signaled_locked(&pt->base))
- list_del_init(&pt->active_list);
- }
-
- spin_unlock_irqrestore(&obj->child_list_lock, flags);
-}
-
-/**
- * sync_pt_create() - creates a sync pt
- * @parent: fence's parent sync_timeline
- * @size: size to allocate for this pt
- * @inc: value of the fence
- *
- * Creates a new sync_pt as a child of @parent. @size bytes will be
- * allocated allowing for implementation specific data to be kept after
- * the generic sync_timeline struct. Returns the sync_pt object or
- * NULL in case of error.
- */
-static struct sync_pt *sync_pt_create(struct sync_timeline *obj, int size,
- unsigned int value)
-{
- unsigned long flags;
- struct sync_pt *pt;
-
- if (size < sizeof(*pt))
- return NULL;
-
- pt = kzalloc(size, GFP_KERNEL);
- if (!pt)
- return NULL;
-
- spin_lock_irqsave(&obj->child_list_lock, flags);
- sync_timeline_get(obj);
- dma_fence_init(&pt->base, &timeline_fence_ops, &obj->child_list_lock,
- obj->context, value);
- list_add_tail(&pt->child_list, &obj->child_list_head);
- INIT_LIST_HEAD(&pt->active_list);
- spin_unlock_irqrestore(&obj->child_list_lock, flags);
- return pt;
-}
-
static const char *timeline_fence_get_driver_name(struct dma_fence *fence)
{
return "sw_sync";
@@ -203,13 +141,17 @@ static void timeline_fence_release(struct dma_fence *fence)
{
struct sync_pt *pt = dma_fence_to_sync_pt(fence);
struct sync_timeline *parent = dma_fence_parent(fence);
- unsigned long flags;
- spin_lock_irqsave(fence->lock, flags);
- list_del(&pt->child_list);
- if (!list_empty(&pt->active_list))
- list_del(&pt->active_list);
- spin_unlock_irqrestore(fence->lock, flags);
+ if (!list_empty(&pt->link)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(fence->lock, flags);
+ if (!list_empty(&pt->link)) {
+ list_del(&pt->link);
+ rb_erase(&pt->node, &parent->pt_tree);
+ }
+ spin_unlock_irqrestore(fence->lock, flags);
+ }
sync_timeline_put(parent);
dma_fence_free(fence);
@@ -219,18 +161,11 @@ static bool timeline_fence_signaled(struct dma_fence *fence)
{
struct sync_timeline *parent = dma_fence_parent(fence);
- return (fence->seqno > parent->value) ? false : true;
+ return !__dma_fence_is_later(fence->seqno, parent->value);
}
static bool timeline_fence_enable_signaling(struct dma_fence *fence)
{
- struct sync_pt *pt = dma_fence_to_sync_pt(fence);
- struct sync_timeline *parent = dma_fence_parent(fence);
-
- if (timeline_fence_signaled(fence))
- return false;
-
- list_add_tail(&pt->active_list, &parent->active_list_head);
return true;
}
@@ -259,6 +194,107 @@ static const struct dma_fence_ops timeline_fence_ops = {
.timeline_value_str = timeline_fence_timeline_value_str,
};
+/**
+ * sync_timeline_signal() - signal a status change on a sync_timeline
+ * @obj: sync_timeline to signal
+ * @inc: num to increment on timeline->value
+ *
+ * A sync implementation should call this any time one of it's fences
+ * has signaled or has an error condition.
+ */
+static void sync_timeline_signal(struct sync_timeline *obj, unsigned int inc)
+{
+ struct sync_pt *pt, *next;
+
+ trace_sync_timeline(obj);
+
+ spin_lock_irq(&obj->lock);
+
+ obj->value += inc;
+
+ list_for_each_entry_safe(pt, next, &obj->pt_list, link) {
+ if (!timeline_fence_signaled(&pt->base))
+ break;
+
+ list_del_init(&pt->link);
+ rb_erase(&pt->node, &obj->pt_tree);
+
+ /*
+ * A signal callback may release the last reference to this
+ * fence, causing it to be freed. That operation has to be
+ * last to avoid a use after free inside this loop, and must
+ * be after we remove the fence from the timeline in order to
+ * prevent deadlocking on timeline->lock inside
+ * timeline_fence_release().
+ */
+ dma_fence_signal_locked(&pt->base);
+ }
+
+ spin_unlock_irq(&obj->lock);
+}
+
+/**
+ * sync_pt_create() - creates a sync pt
+ * @parent: fence's parent sync_timeline
+ * @inc: value of the fence
+ *
+ * Creates a new sync_pt as a child of @parent. @size bytes will be
+ * allocated allowing for implementation specific data to be kept after
+ * the generic sync_timeline struct. Returns the sync_pt object or
+ * NULL in case of error.
+ */
+static struct sync_pt *sync_pt_create(struct sync_timeline *obj,
+ unsigned int value)
+{
+ struct sync_pt *pt;
+
+ pt = kzalloc(sizeof(*pt), GFP_KERNEL);
+ if (!pt)
+ return NULL;
+
+ sync_timeline_get(obj);
+ dma_fence_init(&pt->base, &timeline_fence_ops, &obj->lock,
+ obj->context, value);
+ INIT_LIST_HEAD(&pt->link);
+
+ spin_lock_irq(&obj->lock);
+ if (!dma_fence_is_signaled_locked(&pt->base)) {
+ struct rb_node **p = &obj->pt_tree.rb_node;
+ struct rb_node *parent = NULL;
+
+ while (*p) {
+ struct sync_pt *other;
+ int cmp;
+
+ parent = *p;
+ other = rb_entry(parent, typeof(*pt), node);
+ cmp = value - other->base.seqno;
+ if (cmp > 0) {
+ p = &parent->rb_right;
+ } else if (cmp < 0) {
+ p = &parent->rb_left;
+ } else {
+ if (dma_fence_get_rcu(&other->base)) {
+ dma_fence_put(&pt->base);
+ pt = other;
+ goto unlock;
+ }
+ p = &parent->rb_left;
+ }
+ }
+ rb_link_node(&pt->node, parent, p);
+ rb_insert_color(&pt->node, &obj->pt_tree);
+
+ parent = rb_next(&pt->node);
+ list_add_tail(&pt->link,
+ parent ? &rb_entry(parent, typeof(*pt), node)->link : &obj->pt_list);
+ }
+unlock:
+ spin_unlock_irq(&obj->lock);
+
+ return pt;
+}
+
/*
* *WARNING*
*
@@ -309,7 +345,7 @@ static long sw_sync_ioctl_create_fence(struct sync_timeline *obj,
goto err;
}
- pt = sync_pt_create(obj, sizeof(*pt), data.value);
+ pt = sync_pt_create(obj, data.value);
if (!pt) {
err = -ENOMEM;
goto err;
@@ -345,6 +381,11 @@ static long sw_sync_ioctl_inc(struct sync_timeline *obj, unsigned long arg)
if (copy_from_user(&value, (void __user *)arg, sizeof(value)))
return -EFAULT;
+ while (value > INT_MAX) {
+ sync_timeline_signal(obj, INT_MAX);
+ value -= INT_MAX;
+ }
+
sync_timeline_signal(obj, value);
return 0;
diff --git a/drivers/dma-buf/sync_debug.c b/drivers/dma-buf/sync_debug.c
index 59a3b2f8ee91..c4c8ecb24aa9 100644
--- a/drivers/dma-buf/sync_debug.c
+++ b/drivers/dma-buf/sync_debug.c
@@ -116,17 +116,15 @@ static void sync_print_fence(struct seq_file *s,
static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj)
{
struct list_head *pos;
- unsigned long flags;
seq_printf(s, "%s: %d\n", obj->name, obj->value);
- spin_lock_irqsave(&obj->child_list_lock, flags);
- list_for_each(pos, &obj->child_list_head) {
- struct sync_pt *pt =
- container_of(pos, struct sync_pt, child_list);
+ spin_lock_irq(&obj->lock);
+ list_for_each(pos, &obj->pt_list) {
+ struct sync_pt *pt = container_of(pos, struct sync_pt, link);
sync_print_fence(s, &pt->base, false);
}
- spin_unlock_irqrestore(&obj->child_list_lock, flags);
+ spin_unlock_irq(&obj->lock);
}
static void sync_print_sync_file(struct seq_file *s,
@@ -151,12 +149,11 @@ static void sync_print_sync_file(struct seq_file *s,
static int sync_debugfs_show(struct seq_file *s, void *unused)
{
- unsigned long flags;
struct list_head *pos;
seq_puts(s, "objs:\n--------------\n");
- spin_lock_irqsave(&sync_timeline_list_lock, flags);
+ spin_lock_irq(&sync_timeline_list_lock);
list_for_each(pos, &sync_timeline_list_head) {
struct sync_timeline *obj =
container_of(pos, struct sync_timeline,
@@ -165,11 +162,11 @@ static int sync_debugfs_show(struct seq_file *s, void *unused)
sync_print_obj(s, obj);
seq_putc(s, '\n');
}
- spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
+ spin_unlock_irq(&sync_timeline_list_lock);
seq_puts(s, "fences:\n--------------\n");
- spin_lock_irqsave(&sync_file_list_lock, flags);
+ spin_lock_irq(&sync_file_list_lock);
list_for_each(pos, &sync_file_list_head) {
struct sync_file *sync_file =
container_of(pos, struct sync_file, sync_file_list);
@@ -177,7 +174,7 @@ static int sync_debugfs_show(struct seq_file *s, void *unused)
sync_print_sync_file(s, sync_file);
seq_putc(s, '\n');
}
- spin_unlock_irqrestore(&sync_file_list_lock, flags);
+ spin_unlock_irq(&sync_file_list_lock);
return 0;
}
diff --git a/drivers/dma-buf/sync_debug.h b/drivers/dma-buf/sync_debug.h
index 26fe8b9907b3..d615a89f774c 100644
--- a/drivers/dma-buf/sync_debug.h
+++ b/drivers/dma-buf/sync_debug.h
@@ -14,6 +14,7 @@
#define _LINUX_SYNC_H
#include <linux/list.h>
+#include <linux/rbtree.h>
#include <linux/spinlock.h>
#include <linux/dma-fence.h>
@@ -24,42 +25,41 @@
* struct sync_timeline - sync object
* @kref: reference count on fence.
* @name: name of the sync_timeline. Useful for debugging
- * @child_list_head: list of children sync_pts for this sync_timeline
- * @child_list_lock: lock protecting @child_list_head and fence.status
- * @active_list_head: list of active (unsignaled/errored) sync_pts
+ * @lock: lock protecting @pt_list and @value
+ * @pt_tree: rbtree of active (unsignaled/errored) sync_pts
+ * @pt_list: list of active (unsignaled/errored) sync_pts
* @sync_timeline_list: membership in global sync_timeline_list
*/
struct sync_timeline {
struct kref kref;
char name[32];
- /* protected by child_list_lock */
+ /* protected by lock */
u64 context;
int value;
- struct list_head child_list_head;
- spinlock_t child_list_lock;
-
- struct list_head active_list_head;
+ struct rb_root pt_tree;
+ struct list_head pt_list;
+ spinlock_t lock;
struct list_head sync_timeline_list;
};
static inline struct sync_timeline *dma_fence_parent(struct dma_fence *fence)
{
- return container_of(fence->lock, struct sync_timeline, child_list_lock);
+ return container_of(fence->lock, struct sync_timeline, lock);
}
/**
* struct sync_pt - sync_pt object
* @base: base fence object
- * @child_list: sync timeline child's list
- * @active_list: sync timeline active child's list
+ * @link: link on the sync timeline's list
+ * @node: node in the sync timeline's tree
*/
struct sync_pt {
struct dma_fence base;
- struct list_head child_list;
- struct list_head active_list;
+ struct list_head link;
+ struct rb_node node;
};
#ifdef CONFIG_SW_SYNC
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index fa8f9c07ce73..fadc4d8783bd 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -56,6 +56,12 @@ config DMA_OF
select DMA_ENGINE
#devices
+config ALTERA_MSGDMA
+ tristate "Altera / Intel mSGDMA Engine"
+ select DMA_ENGINE
+ help
+ Enable support for Altera / Intel mSGDMA controller.
+
config AMBA_PL08X
bool "ARM PrimeCell PL080 or PL081 support"
depends on ARM_AMBA
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index d12ab2985ed1..f08f8de1b567 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -12,6 +12,7 @@ obj-$(CONFIG_DMA_OF) += of-dma.o
obj-$(CONFIG_DMATEST) += dmatest.o
#devices
+obj-$(CONFIG_ALTERA_MSGDMA) += altera-msgdma.o
obj-$(CONFIG_AMBA_PL08X) += amba-pl08x.o
obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/
obj-$(CONFIG_AT_HDMAC) += at_hdmac.o
diff --git a/drivers/dma/altera-msgdma.c b/drivers/dma/altera-msgdma.c
new file mode 100644
index 000000000000..32905d5606ac
--- /dev/null
+++ b/drivers/dma/altera-msgdma.c
@@ -0,0 +1,927 @@
+/*
+ * DMA driver for Altera mSGDMA IP core
+ *
+ * Copyright (C) 2017 Stefan Roese <sr@denx.de>
+ *
+ * Based on drivers/dma/xilinx/zynqmp_dma.c, which is:
+ * Copyright (C) 2016 Xilinx, Inc. All rights reserved.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "dmaengine.h"
+
+#define MSGDMA_MAX_TRANS_LEN U32_MAX
+#define MSGDMA_DESC_NUM 1024
+
+/**
+ * struct msgdma_extended_desc - implements an extended descriptor
+ * @read_addr_lo: data buffer source address low bits
+ * @write_addr_lo: data buffer destination address low bits
+ * @len: the number of bytes to transfer per descriptor
+ * @burst_seq_num: bit 31:24 write burst
+ * bit 23:16 read burst
+ * bit 15:00 sequence number
+ * @stride: bit 31:16 write stride
+ * bit 15:00 read stride
+ * @read_addr_hi: data buffer source address high bits
+ * @write_addr_hi: data buffer destination address high bits
+ * @control: characteristics of the transfer
+ */
+struct msgdma_extended_desc {
+ u32 read_addr_lo;
+ u32 write_addr_lo;
+ u32 len;
+ u32 burst_seq_num;
+ u32 stride;
+ u32 read_addr_hi;
+ u32 write_addr_hi;
+ u32 control;
+};
+
+/* mSGDMA descriptor control field bit definitions */
+#define MSGDMA_DESC_CTL_SET_CH(x) ((x) & 0xff)
+#define MSGDMA_DESC_CTL_GEN_SOP BIT(8)
+#define MSGDMA_DESC_CTL_GEN_EOP BIT(9)
+#define MSGDMA_DESC_CTL_PARK_READS BIT(10)
+#define MSGDMA_DESC_CTL_PARK_WRITES BIT(11)
+#define MSGDMA_DESC_CTL_END_ON_EOP BIT(12)
+#define MSGDMA_DESC_CTL_END_ON_LEN BIT(13)
+#define MSGDMA_DESC_CTL_TR_COMP_IRQ BIT(14)
+#define MSGDMA_DESC_CTL_EARLY_IRQ BIT(15)
+#define MSGDMA_DESC_CTL_TR_ERR_IRQ GENMASK(23, 16)
+#define MSGDMA_DESC_CTL_EARLY_DONE BIT(24)
+
+/*
+ * Writing "1" the "go" bit commits the entire descriptor into the
+ * descriptor FIFO(s)
+ */
+#define MSGDMA_DESC_CTL_GO BIT(31)
+
+/* Tx buffer control flags */
+#define MSGDMA_DESC_CTL_TX_FIRST (MSGDMA_DESC_CTL_GEN_SOP | \
+ MSGDMA_DESC_CTL_TR_ERR_IRQ | \
+ MSGDMA_DESC_CTL_GO)
+
+#define MSGDMA_DESC_CTL_TX_MIDDLE (MSGDMA_DESC_CTL_TR_ERR_IRQ | \
+ MSGDMA_DESC_CTL_GO)
+
+#define MSGDMA_DESC_CTL_TX_LAST (MSGDMA_DESC_CTL_GEN_EOP | \
+ MSGDMA_DESC_CTL_TR_COMP_IRQ | \
+ MSGDMA_DESC_CTL_TR_ERR_IRQ | \
+ MSGDMA_DESC_CTL_GO)
+
+#define MSGDMA_DESC_CTL_TX_SINGLE (MSGDMA_DESC_CTL_GEN_SOP | \
+ MSGDMA_DESC_CTL_GEN_EOP | \
+ MSGDMA_DESC_CTL_TR_COMP_IRQ | \
+ MSGDMA_DESC_CTL_TR_ERR_IRQ | \
+ MSGDMA_DESC_CTL_GO)
+
+#define MSGDMA_DESC_CTL_RX_SINGLE (MSGDMA_DESC_CTL_END_ON_EOP | \
+ MSGDMA_DESC_CTL_END_ON_LEN | \
+ MSGDMA_DESC_CTL_TR_COMP_IRQ | \
+ MSGDMA_DESC_CTL_EARLY_IRQ | \
+ MSGDMA_DESC_CTL_TR_ERR_IRQ | \
+ MSGDMA_DESC_CTL_GO)
+
+/* mSGDMA extended descriptor stride definitions */
+#define MSGDMA_DESC_STRIDE_RD 0x00000001
+#define MSGDMA_DESC_STRIDE_WR 0x00010000
+#define MSGDMA_DESC_STRIDE_RW 0x00010001
+
+/* mSGDMA dispatcher control and status register map */
+#define MSGDMA_CSR_STATUS 0x00 /* Read / Clear */
+#define MSGDMA_CSR_CONTROL 0x04 /* Read / Write */
+#define MSGDMA_CSR_RW_FILL_LEVEL 0x08 /* 31:16 - write fill level */
+ /* 15:00 - read fill level */
+#define MSGDMA_CSR_RESP_FILL_LEVEL 0x0c /* response FIFO fill level */
+#define MSGDMA_CSR_RW_SEQ_NUM 0x10 /* 31:16 - write seq number */
+ /* 15:00 - read seq number */
+
+/* mSGDMA CSR status register bit definitions */
+#define MSGDMA_CSR_STAT_BUSY BIT(0)
+#define MSGDMA_CSR_STAT_DESC_BUF_EMPTY BIT(1)
+#define MSGDMA_CSR_STAT_DESC_BUF_FULL BIT(2)
+#define MSGDMA_CSR_STAT_RESP_BUF_EMPTY BIT(3)
+#define MSGDMA_CSR_STAT_RESP_BUF_FULL BIT(4)
+#define MSGDMA_CSR_STAT_STOPPED BIT(5)
+#define MSGDMA_CSR_STAT_RESETTING BIT(6)
+#define MSGDMA_CSR_STAT_STOPPED_ON_ERR BIT(7)
+#define MSGDMA_CSR_STAT_STOPPED_ON_EARLY BIT(8)
+#define MSGDMA_CSR_STAT_IRQ BIT(9)
+#define MSGDMA_CSR_STAT_MASK GENMASK(9, 0)
+#define MSGDMA_CSR_STAT_MASK_WITHOUT_IRQ GENMASK(8, 0)
+
+#define DESC_EMPTY (MSGDMA_CSR_STAT_DESC_BUF_EMPTY | \
+ MSGDMA_CSR_STAT_RESP_BUF_EMPTY)
+
+/* mSGDMA CSR control register bit definitions */
+#define MSGDMA_CSR_CTL_STOP BIT(0)
+#define MSGDMA_CSR_CTL_RESET BIT(1)
+#define MSGDMA_CSR_CTL_STOP_ON_ERR BIT(2)
+#define MSGDMA_CSR_CTL_STOP_ON_EARLY BIT(3)
+#define MSGDMA_CSR_CTL_GLOBAL_INTR BIT(4)
+#define MSGDMA_CSR_CTL_STOP_DESCS BIT(5)
+
+/* mSGDMA CSR fill level bits */
+#define MSGDMA_CSR_WR_FILL_LEVEL_GET(v) (((v) & 0xffff0000) >> 16)
+#define MSGDMA_CSR_RD_FILL_LEVEL_GET(v) ((v) & 0x0000ffff)
+#define MSGDMA_CSR_RESP_FILL_LEVEL_GET(v) ((v) & 0x0000ffff)
+
+#define MSGDMA_CSR_SEQ_NUM_GET(v) (((v) & 0xffff0000) >> 16)
+
+/* mSGDMA response register map */
+#define MSGDMA_RESP_BYTES_TRANSFERRED 0x00
+#define MSGDMA_RESP_STATUS 0x04
+
+/* mSGDMA response register bit definitions */
+#define MSGDMA_RESP_EARLY_TERM BIT(8)
+#define MSGDMA_RESP_ERR_MASK 0xff
+
+/**
+ * struct msgdma_sw_desc - implements a sw descriptor
+ * @async_tx: support for the async_tx api
+ * @hw_desc: assosiated HW descriptor
+ * @free_list: node of the free SW descriprots list
+ */
+struct msgdma_sw_desc {
+ struct dma_async_tx_descriptor async_tx;
+ struct msgdma_extended_desc hw_desc;
+ struct list_head node;
+ struct list_head tx_list;
+};
+
+/**
+ * struct msgdma_device - DMA device structure
+ */
+struct msgdma_device {
+ spinlock_t lock;
+ struct device *dev;
+ struct tasklet_struct irq_tasklet;
+ struct list_head pending_list;
+ struct list_head free_list;
+ struct list_head active_list;
+ struct list_head done_list;
+ u32 desc_free_cnt;
+ bool idle;
+
+ struct dma_device dmadev;
+ struct dma_chan dmachan;
+ dma_addr_t hw_desq;
+ struct msgdma_sw_desc *sw_desq;
+ unsigned int npendings;
+
+ struct dma_slave_config slave_cfg;
+
+ int irq;
+
+ /* mSGDMA controller */
+ void __iomem *csr;
+
+ /* mSGDMA descriptors */
+ void __iomem *desc;
+
+ /* mSGDMA response */
+ void __iomem *resp;
+};
+
+#define to_mdev(chan) container_of(chan, struct msgdma_device, dmachan)
+#define tx_to_desc(tx) container_of(tx, struct msgdma_sw_desc, async_tx)
+
+/**
+ * msgdma_get_descriptor - Get the sw descriptor from the pool
+ * @mdev: Pointer to the Altera mSGDMA device structure
+ *
+ * Return: The sw descriptor
+ */
+static struct msgdma_sw_desc *msgdma_get_descriptor(struct msgdma_device *mdev)
+{
+ struct msgdma_sw_desc *desc;
+
+ spin_lock_bh(&mdev->lock);
+ desc = list_first_entry(&mdev->free_list, struct msgdma_sw_desc, node);
+ list_del(&desc->node);
+ spin_unlock_bh(&mdev->lock);
+
+ INIT_LIST_HEAD(&desc->tx_list);
+
+ return desc;
+}
+
+/**
+ * msgdma_free_descriptor - Issue pending transactions
+ * @mdev: Pointer to the Altera mSGDMA device structure
+ * @desc: Transaction descriptor pointer
+ */
+static void msgdma_free_descriptor(struct msgdma_device *mdev,
+ struct msgdma_sw_desc *desc)
+{
+ struct msgdma_sw_desc *child, *next;
+
+ mdev->desc_free_cnt++;
+ list_add_tail(&desc->node, &mdev->free_list);
+ list_for_each_entry_safe(child, next, &desc->tx_list, node) {
+ mdev->desc_free_cnt++;
+ list_move_tail(&child->node, &mdev->free_list);
+ }
+}
+
+/**
+ * msgdma_free_desc_list - Free descriptors list
+ * @mdev: Pointer to the Altera mSGDMA device structure
+ * @list: List to parse and delete the descriptor
+ */
+static void msgdma_free_desc_list(struct msgdma_device *mdev,
+ struct list_head *list)
+{
+ struct msgdma_sw_desc *desc, *next;
+
+ list_for_each_entry_safe(desc, next, list, node)
+ msgdma_free_descriptor(mdev, desc);
+}
+
+/**
+ * msgdma_desc_config - Configure the descriptor
+ * @desc: Hw descriptor pointer
+ * @dst: Destination buffer address
+ * @src: Source buffer address
+ * @len: Transfer length
+ */
+static void msgdma_desc_config(struct msgdma_extended_desc *desc,
+ dma_addr_t dst, dma_addr_t src, size_t len,
+ u32 stride)
+{
+ /* Set lower 32bits of src & dst addresses in the descriptor */
+ desc->read_addr_lo = lower_32_bits(src);
+ desc->write_addr_lo = lower_32_bits(dst);
+
+ /* Set upper 32bits of src & dst addresses in the descriptor */
+ desc->read_addr_hi = upper_32_bits(src);
+ desc->write_addr_hi = upper_32_bits(dst);
+
+ desc->len = len;
+ desc->stride = stride;
+ desc->burst_seq_num = 0; /* 0 will result in max burst length */
+
+ /*
+ * Don't set interrupt on xfer end yet, this will be done later
+ * for the "last" descriptor
+ */
+ desc->control = MSGDMA_DESC_CTL_TR_ERR_IRQ | MSGDMA_DESC_CTL_GO |
+ MSGDMA_DESC_CTL_END_ON_LEN;
+}
+
+/**
+ * msgdma_desc_config_eod - Mark the descriptor as end descriptor
+ * @desc: Hw descriptor pointer
+ */
+static void msgdma_desc_config_eod(struct msgdma_extended_desc *desc)
+{
+ desc->control |= MSGDMA_DESC_CTL_TR_COMP_IRQ;
+}
+
+/**
+ * msgdma_tx_submit - Submit DMA transaction
+ * @tx: Async transaction descriptor pointer
+ *
+ * Return: cookie value
+ */
+static dma_cookie_t msgdma_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+ struct msgdma_device *mdev = to_mdev(tx->chan);
+ struct msgdma_sw_desc *new;
+ dma_cookie_t cookie;
+
+ new = tx_to_desc(tx);
+ spin_lock_bh(&mdev->lock);
+ cookie = dma_cookie_assign(tx);
+
+ list_add_tail(&new->node, &mdev->pending_list);
+ spin_unlock_bh(&mdev->lock);
+
+ return cookie;
+}
+
+/**
+ * msgdma_prep_memcpy - prepare descriptors for memcpy transaction
+ * @dchan: DMA channel
+ * @dma_dst: Destination buffer address
+ * @dma_src: Source buffer address
+ * @len: Transfer length
+ * @flags: transfer ack flags
+ *
+ * Return: Async transaction descriptor on success and NULL on failure
+ */
+static struct dma_async_tx_descriptor *
+msgdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst,
+ dma_addr_t dma_src, size_t len, ulong flags)
+{
+ struct msgdma_device *mdev = to_mdev(dchan);
+ struct msgdma_sw_desc *new, *first = NULL;
+ struct msgdma_extended_desc *desc;
+ size_t copy;
+ u32 desc_cnt;
+
+ desc_cnt = DIV_ROUND_UP(len, MSGDMA_MAX_TRANS_LEN);
+
+ spin_lock_bh(&mdev->lock);
+ if (desc_cnt > mdev->desc_free_cnt) {
+ spin_unlock_bh(&mdev->lock);
+ dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev);
+ return NULL;
+ }
+ mdev->desc_free_cnt -= desc_cnt;
+ spin_unlock_bh(&mdev->lock);
+
+ do {
+ /* Allocate and populate the descriptor */
+ new = msgdma_get_descriptor(mdev);
+
+ copy = min_t(size_t, len, MSGDMA_MAX_TRANS_LEN);
+ desc = &new->hw_desc;
+ msgdma_desc_config(desc, dma_dst, dma_src, copy,
+ MSGDMA_DESC_STRIDE_RW);
+ len -= copy;
+ dma_src += copy;
+ dma_dst += copy;
+ if (!first)
+ first = new;
+ else
+ list_add_tail(&new->node, &first->tx_list);
+ } while (len);
+
+ msgdma_desc_config_eod(desc);
+ async_tx_ack(&first->async_tx);
+ first->async_tx.flags = flags;
+
+ return &first->async_tx;
+}
+
+/**
+ * msgdma_prep_slave_sg - prepare descriptors for a slave sg transaction
+ *
+ * @dchan: DMA channel
+ * @sgl: Destination scatter list
+ * @sg_len: Number of entries in destination scatter list
+ * @dir: DMA transfer direction
+ * @flags: transfer ack flags
+ * @context: transfer context (unused)
+ */
+static struct dma_async_tx_descriptor *
+msgdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
+ unsigned int sg_len, enum dma_transfer_direction dir,
+ unsigned long flags, void *context)
+
+{
+ struct msgdma_device *mdev = to_mdev(dchan);
+ struct dma_slave_config *cfg = &mdev->slave_cfg;
+ struct msgdma_sw_desc *new, *first = NULL;
+ void *desc = NULL;
+ size_t len, avail;
+ dma_addr_t dma_dst, dma_src;
+ u32 desc_cnt = 0, i;
+ struct scatterlist *sg;
+ u32 stride;
+
+ for_each_sg(sgl, sg, sg_len, i)
+ desc_cnt += DIV_ROUND_UP(sg_dma_len(sg), MSGDMA_MAX_TRANS_LEN);
+
+ spin_lock_bh(&mdev->lock);
+ if (desc_cnt > mdev->desc_free_cnt) {
+ spin_unlock_bh(&mdev->lock);
+ dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev);
+ return NULL;
+ }
+ mdev->desc_free_cnt -= desc_cnt;
+ spin_unlock_bh(&mdev->lock);
+
+ avail = sg_dma_len(sgl);
+
+ /* Run until we are out of scatterlist entries */
+ while (true) {
+ /* Allocate and populate the descriptor */
+ new = msgdma_get_descriptor(mdev);
+
+ desc = &new->hw_desc;
+ len = min_t(size_t, avail, MSGDMA_MAX_TRANS_LEN);
+
+ if (dir == DMA_MEM_TO_DEV) {
+ dma_src = sg_dma_address(sgl) + sg_dma_len(sgl) - avail;
+ dma_dst = cfg->dst_addr;
+ stride = MSGDMA_DESC_STRIDE_RD;
+ } else {
+ dma_src = cfg->src_addr;
+ dma_dst = sg_dma_address(sgl) + sg_dma_len(sgl) - avail;
+ stride = MSGDMA_DESC_STRIDE_WR;
+ }
+ msgdma_desc_config(desc, dma_dst, dma_src, len, stride);
+ avail -= len;
+
+ if (!first)
+ first = new;
+ else
+ list_add_tail(&new->node, &first->tx_list);
+
+ /* Fetch the next scatterlist entry */
+ if (avail == 0) {
+ if (sg_len == 0)
+ break;
+ sgl = sg_next(sgl);
+ if (sgl == NULL)
+ break;
+ sg_len--;
+ avail = sg_dma_len(sgl);
+ }
+ }
+
+ msgdma_desc_config_eod(desc);
+ first->async_tx.flags = flags;
+
+ return &first->async_tx;
+}
+
+static int msgdma_dma_config(struct dma_chan *dchan,
+ struct dma_slave_config *config)
+{
+ struct msgdma_device *mdev = to_mdev(dchan);
+
+ memcpy(&mdev->slave_cfg, config, sizeof(*config));
+
+ return 0;
+}
+
+static void msgdma_reset(struct msgdma_device *mdev)
+{
+ u32 val;
+ int ret;
+
+ /* Reset mSGDMA */
+ iowrite32(MSGDMA_CSR_STAT_MASK, mdev->csr + MSGDMA_CSR_STATUS);
+ iowrite32(MSGDMA_CSR_CTL_RESET, mdev->csr + MSGDMA_CSR_CONTROL);
+
+ ret = readl_poll_timeout(mdev->csr + MSGDMA_CSR_STATUS, val,
+ (val & MSGDMA_CSR_STAT_RESETTING) == 0,
+ 1, 10000);
+ if (ret)
+ dev_err(mdev->dev, "DMA channel did not reset\n");
+
+ /* Clear all status bits */
+ iowrite32(MSGDMA_CSR_STAT_MASK, mdev->csr + MSGDMA_CSR_STATUS);
+
+ /* Enable the DMA controller including interrupts */
+ iowrite32(MSGDMA_CSR_CTL_STOP_ON_ERR | MSGDMA_CSR_CTL_STOP_ON_EARLY |
+ MSGDMA_CSR_CTL_GLOBAL_INTR, mdev->csr + MSGDMA_CSR_CONTROL);
+
+ mdev->idle = true;
+};
+
+static void msgdma_copy_one(struct msgdma_device *mdev,
+ struct msgdma_sw_desc *desc)
+{
+ void __iomem *hw_desc = mdev->desc;
+
+ /*
+ * Check if the DESC FIFO it not full. If its full, we need to wait
+ * for at least one entry to become free again
+ */
+ while (ioread32(mdev->csr + MSGDMA_CSR_STATUS) &
+ MSGDMA_CSR_STAT_DESC_BUF_FULL)
+ mdelay(1);
+
+ /*
+ * The descriptor needs to get copied into the descriptor FIFO
+ * of the DMA controller. The descriptor will get flushed to the
+ * FIFO, once the last word (control word) is written. Since we
+ * are not 100% sure that memcpy() writes all word in the "correct"
+ * oder (address from low to high) on all architectures, we make
+ * sure this control word is written last by single coding it and
+ * adding some write-barriers here.
+ */
+ memcpy((void __force *)hw_desc, &desc->hw_desc,
+ sizeof(desc->hw_desc) - sizeof(u32));
+
+ /* Write control word last to flush this descriptor into the FIFO */
+ mdev->idle = false;
+ wmb();
+ iowrite32(desc->hw_desc.control, hw_desc +
+ offsetof(struct msgdma_extended_desc, control));
+ wmb();
+}
+
+/**
+ * msgdma_copy_desc_to_fifo - copy descriptor(s) into controller FIFO
+ * @mdev: Pointer to the Altera mSGDMA device structure
+ * @desc: Transaction descriptor pointer
+ */
+static void msgdma_copy_desc_to_fifo(struct msgdma_device *mdev,
+ struct msgdma_sw_desc *desc)
+{
+ struct msgdma_sw_desc *sdesc, *next;
+
+ msgdma_copy_one(mdev, desc);
+
+ list_for_each_entry_safe(sdesc, next, &desc->tx_list, node)
+ msgdma_copy_one(mdev, sdesc);
+}
+
+/**
+ * msgdma_start_transfer - Initiate the new transfer
+ * @mdev: Pointer to the Altera mSGDMA device structure
+ */
+static void msgdma_start_transfer(struct msgdma_device *mdev)
+{
+ struct msgdma_sw_desc *desc;
+
+ if (!mdev->idle)
+ return;
+
+ desc = list_first_entry_or_null(&mdev->pending_list,
+ struct msgdma_sw_desc, node);
+ if (!desc)
+ return;
+
+ list_splice_tail_init(&mdev->pending_list, &mdev->active_list);
+ msgdma_copy_desc_to_fifo(mdev, desc);
+}
+
+/**
+ * msgdma_issue_pending - Issue pending transactions
+ * @chan: DMA channel pointer
+ */
+static void msgdma_issue_pending(struct dma_chan *chan)
+{
+ struct msgdma_device *mdev = to_mdev(chan);
+
+ spin_lock_bh(&mdev->lock);
+ msgdma_start_transfer(mdev);
+ spin_unlock_bh(&mdev->lock);
+}
+
+/**
+ * msgdma_chan_desc_cleanup - Cleanup the completed descriptors
+ * @mdev: Pointer to the Altera mSGDMA device structure
+ */
+static void msgdma_chan_desc_cleanup(struct msgdma_device *mdev)
+{
+ struct msgdma_sw_desc *desc, *next;
+
+ list_for_each_entry_safe(desc, next, &mdev->done_list, node) {
+ dma_async_tx_callback callback;
+ void *callback_param;
+
+ list_del(&desc->node);
+
+ callback = desc->async_tx.callback;
+ callback_param = desc->async_tx.callback_param;
+ if (callback) {
+ spin_unlock(&mdev->lock);
+ callback(callback_param);
+ spin_lock(&mdev->lock);
+ }
+
+ /* Run any dependencies, then free the descriptor */
+ msgdma_free_descriptor(mdev, desc);
+ }
+}
+
+/**
+ * msgdma_complete_descriptor - Mark the active descriptor as complete
+ * @mdev: Pointer to the Altera mSGDMA device structure
+ */
+static void msgdma_complete_descriptor(struct msgdma_device *mdev)
+{
+ struct msgdma_sw_desc *desc;
+
+ desc = list_first_entry_or_null(&mdev->active_list,
+ struct msgdma_sw_desc, node);
+ if (!desc)
+ return;
+ list_del(&desc->node);
+ dma_cookie_complete(&desc->async_tx);
+ list_add_tail(&desc->node, &mdev->done_list);
+}
+
+/**
+ * msgdma_free_descriptors - Free channel descriptors
+ * @mdev: Pointer to the Altera mSGDMA device structure
+ */
+static void msgdma_free_descriptors(struct msgdma_device *mdev)
+{
+ msgdma_free_desc_list(mdev, &mdev->active_list);
+ msgdma_free_desc_list(mdev, &mdev->pending_list);
+ msgdma_free_desc_list(mdev, &mdev->done_list);
+}
+
+/**
+ * msgdma_free_chan_resources - Free channel resources
+ * @dchan: DMA channel pointer
+ */
+static void msgdma_free_chan_resources(struct dma_chan *dchan)
+{
+ struct msgdma_device *mdev = to_mdev(dchan);
+
+ spin_lock_bh(&mdev->lock);
+ msgdma_free_descriptors(mdev);
+ spin_unlock_bh(&mdev->lock);
+ kfree(mdev->sw_desq);
+}
+
+/**
+ * msgdma_alloc_chan_resources - Allocate channel resources
+ * @dchan: DMA channel
+ *
+ * Return: Number of descriptors on success and failure value on error
+ */
+static int msgdma_alloc_chan_resources(struct dma_chan *dchan)
+{
+ struct msgdma_device *mdev = to_mdev(dchan);
+ struct msgdma_sw_desc *desc;
+ int i;
+
+ mdev->sw_desq = kcalloc(MSGDMA_DESC_NUM, sizeof(*desc), GFP_NOWAIT);
+ if (!mdev->sw_desq)
+ return -ENOMEM;
+
+ mdev->idle = true;
+ mdev->desc_free_cnt = MSGDMA_DESC_NUM;
+
+ INIT_LIST_HEAD(&mdev->free_list);
+
+ for (i = 0; i < MSGDMA_DESC_NUM; i++) {
+ desc = mdev->sw_desq + i;
+ dma_async_tx_descriptor_init(&desc->async_tx, &mdev->dmachan);
+ desc->async_tx.tx_submit = msgdma_tx_submit;
+ list_add_tail(&desc->node, &mdev->free_list);
+ }
+
+ return MSGDMA_DESC_NUM;
+}
+
+/**
+ * msgdma_tasklet - Schedule completion tasklet
+ * @data: Pointer to the Altera sSGDMA channel structure
+ */
+static void msgdma_tasklet(unsigned long data)
+{
+ struct msgdma_device *mdev = (struct msgdma_device *)data;
+ u32 count;
+ u32 __maybe_unused size;
+ u32 __maybe_unused status;
+
+ spin_lock(&mdev->lock);
+
+ /* Read number of responses that are available */
+ count = ioread32(mdev->csr + MSGDMA_CSR_RESP_FILL_LEVEL);
+ dev_dbg(mdev->dev, "%s (%d): response count=%d\n",
+ __func__, __LINE__, count);
+
+ while (count--) {
+ /*
+ * Read both longwords to purge this response from the FIFO
+ * On Avalon-MM implementations, size and status do not
+ * have any real values, like transferred bytes or error
+ * bits. So we need to just drop these values.
+ */
+ size = ioread32(mdev->resp + MSGDMA_RESP_BYTES_TRANSFERRED);
+ status = ioread32(mdev->resp - MSGDMA_RESP_STATUS);
+
+ msgdma_complete_descriptor(mdev);
+ msgdma_chan_desc_cleanup(mdev);
+ }
+
+ spin_unlock(&mdev->lock);
+}
+
+/**
+ * msgdma_irq_handler - Altera mSGDMA Interrupt handler
+ * @irq: IRQ number
+ * @data: Pointer to the Altera mSGDMA device structure
+ *
+ * Return: IRQ_HANDLED/IRQ_NONE
+ */
+static irqreturn_t msgdma_irq_handler(int irq, void *data)
+{
+ struct msgdma_device *mdev = data;
+ u32 status;
+
+ status = ioread32(mdev->csr + MSGDMA_CSR_STATUS);
+ if ((status & MSGDMA_CSR_STAT_BUSY) == 0) {
+ /* Start next transfer if the DMA controller is idle */
+ spin_lock(&mdev->lock);
+ mdev->idle = true;
+ msgdma_start_transfer(mdev);
+ spin_unlock(&mdev->lock);
+ }
+
+ tasklet_schedule(&mdev->irq_tasklet);
+
+ /* Clear interrupt in mSGDMA controller */
+ iowrite32(MSGDMA_CSR_STAT_IRQ, mdev->csr + MSGDMA_CSR_STATUS);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * msgdma_chan_remove - Channel remove function
+ * @mdev: Pointer to the Altera mSGDMA device structure
+ */
+static void msgdma_dev_remove(struct msgdma_device *mdev)
+{
+ if (!mdev)
+ return;
+
+ devm_free_irq(mdev->dev, mdev->irq, mdev);
+ tasklet_kill(&mdev->irq_tasklet);
+ list_del(&mdev->dmachan.device_node);
+}
+
+static int request_and_map(struct platform_device *pdev, const char *name,
+ struct resource **res, void __iomem **ptr)
+{
+ struct resource *region;
+ struct device *device = &pdev->dev;
+
+ *res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
+ if (*res == NULL) {
+ dev_err(device, "resource %s not defined\n", name);
+ return -ENODEV;
+ }
+
+ region = devm_request_mem_region(device, (*res)->start,
+ resource_size(*res), dev_name(device));
+ if (region == NULL) {
+ dev_err(device, "unable to request %s\n", name);
+ return -EBUSY;
+ }
+
+ *ptr = devm_ioremap_nocache(device, region->start,
+ resource_size(region));
+ if (*ptr == NULL) {
+ dev_err(device, "ioremap_nocache of %s failed!", name);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/**
+ * msgdma_probe - Driver probe function
+ * @pdev: Pointer to the platform_device structure
+ *
+ * Return: '0' on success and failure value on error
+ */
+static int msgdma_probe(struct platform_device *pdev)
+{
+ struct msgdma_device *mdev;
+ struct dma_device *dma_dev;
+ struct resource *dma_res;
+ int ret;
+
+ mdev = devm_kzalloc(&pdev->dev, sizeof(*mdev), GFP_NOWAIT);
+ if (!mdev)
+ return -ENOMEM;
+
+ mdev->dev = &pdev->dev;
+
+ /* Map CSR space */
+ ret = request_and_map(pdev, "csr", &dma_res, &mdev->csr);
+ if (ret)
+ return ret;
+
+ /* Map (extended) descriptor space */
+ ret = request_and_map(pdev, "desc", &dma_res, &mdev->desc);
+ if (ret)
+ return ret;
+
+ /* Map response space */
+ ret = request_and_map(pdev, "resp", &dma_res, &mdev->resp);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, mdev);
+
+ /* Get interrupt nr from platform data */
+ mdev->irq = platform_get_irq(pdev, 0);
+ if (mdev->irq < 0)
+ return -ENXIO;
+
+ ret = devm_request_irq(&pdev->dev, mdev->irq, msgdma_irq_handler,
+ 0, dev_name(&pdev->dev), mdev);
+ if (ret)
+ return ret;
+
+ tasklet_init(&mdev->irq_tasklet, msgdma_tasklet, (unsigned long)mdev);
+
+ dma_cookie_init(&mdev->dmachan);
+
+ spin_lock_init(&mdev->lock);
+
+ INIT_LIST_HEAD(&mdev->active_list);
+ INIT_LIST_HEAD(&mdev->pending_list);
+ INIT_LIST_HEAD(&mdev->done_list);
+ INIT_LIST_HEAD(&mdev->free_list);
+
+ dma_dev = &mdev->dmadev;
+
+ /* Set DMA capabilities */
+ dma_cap_zero(dma_dev->cap_mask);
+ dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
+ dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
+
+ dma_dev->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
+ dma_dev->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
+ dma_dev->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM) |
+ BIT(DMA_MEM_TO_MEM);
+ dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
+
+ /* Init DMA link list */
+ INIT_LIST_HEAD(&dma_dev->channels);
+
+ /* Set base routines */
+ dma_dev->device_tx_status = dma_cookie_status;
+ dma_dev->device_issue_pending = msgdma_issue_pending;
+ dma_dev->dev = &pdev->dev;
+
+ dma_dev->copy_align = DMAENGINE_ALIGN_4_BYTES;
+ dma_dev->device_prep_dma_memcpy = msgdma_prep_memcpy;
+ dma_dev->device_prep_slave_sg = msgdma_prep_slave_sg;
+ dma_dev->device_config = msgdma_dma_config;
+
+ dma_dev->device_alloc_chan_resources = msgdma_alloc_chan_resources;
+ dma_dev->device_free_chan_resources = msgdma_free_chan_resources;
+
+ mdev->dmachan.device = dma_dev;
+ list_add_tail(&mdev->dmachan.device_node, &dma_dev->channels);
+
+ /* Set DMA mask to 64 bits */
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (ret) {
+ dev_warn(&pdev->dev, "unable to set coherent mask to 64");
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret)
+ goto fail;
+ }
+
+ msgdma_reset(mdev);
+
+ ret = dma_async_device_register(dma_dev);
+ if (ret)
+ goto fail;
+
+ dev_notice(&pdev->dev, "Altera mSGDMA driver probe success\n");
+
+ return 0;
+
+fail:
+ msgdma_dev_remove(mdev);
+
+ return ret;
+}
+
+/**
+ * msgdma_dma_remove - Driver remove function
+ * @pdev: Pointer to the platform_device structure
+ *
+ * Return: Always '0'
+ */
+static int msgdma_remove(struct platform_device *pdev)
+{
+ struct msgdma_device *mdev = platform_get_drvdata(pdev);
+
+ dma_async_device_unregister(&mdev->dmadev);
+ msgdma_dev_remove(mdev);
+
+ dev_notice(&pdev->dev, "Altera mSGDMA driver removed\n");
+
+ return 0;
+}
+
+static struct platform_driver msgdma_driver = {
+ .driver = {
+ .name = "altera-msgdma",
+ },
+ .probe = msgdma_probe,
+ .remove = msgdma_remove,
+};
+
+module_platform_driver(msgdma_driver);
+
+MODULE_ALIAS("platform:altera-msgdma");
+MODULE_DESCRIPTION("Altera mSGDMA driver");
+MODULE_AUTHOR("Stefan Roese <sr@denx.de>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index 13cc95c0474c..b52b0d55247e 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -3033,7 +3033,7 @@ static struct vendor_data vendor_ftdmac020 = {
.max_transfer_size = PL080_CONTROL_TRANSFER_SIZE_MASK,
};
-static struct amba_id pl08x_ids[] = {
+static const struct amba_id pl08x_ids[] = {
/* Samsung PL080S variant */
{
.id = 0x0a141080,
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index 1baf3404a365..fbab271b3bf9 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -1203,138 +1203,6 @@ err:
}
/**
- * atc_prep_dma_sg - prepare memory to memory scather-gather operation
- * @chan: the channel to prepare operation on
- * @dst_sg: destination scatterlist
- * @dst_nents: number of destination scatterlist entries
- * @src_sg: source scatterlist
- * @src_nents: number of source scatterlist entries
- * @flags: tx descriptor status flags
- */
-static struct dma_async_tx_descriptor *
-atc_prep_dma_sg(struct dma_chan *chan,
- struct scatterlist *dst_sg, unsigned int dst_nents,
- struct scatterlist *src_sg, unsigned int src_nents,
- unsigned long flags)
-{
- struct at_dma_chan *atchan = to_at_dma_chan(chan);
- struct at_desc *desc = NULL;
- struct at_desc *first = NULL;
- struct at_desc *prev = NULL;
- unsigned int src_width;
- unsigned int dst_width;
- size_t xfer_count;
- u32 ctrla;
- u32 ctrlb;
- size_t dst_len = 0, src_len = 0;
- dma_addr_t dst = 0, src = 0;
- size_t len = 0, total_len = 0;
-
- if (unlikely(dst_nents == 0 || src_nents == 0))
- return NULL;
-
- if (unlikely(dst_sg == NULL || src_sg == NULL))
- return NULL;
-
- ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN
- | ATC_SRC_ADDR_MODE_INCR
- | ATC_DST_ADDR_MODE_INCR
- | ATC_FC_MEM2MEM;
-
- /*
- * loop until there is either no more source or no more destination
- * scatterlist entry
- */
- while (true) {
-
- /* prepare the next transfer */
- if (dst_len == 0) {
-
- /* no more destination scatterlist entries */
- if (!dst_sg || !dst_nents)
- break;
-
- dst = sg_dma_address(dst_sg);
- dst_len = sg_dma_len(dst_sg);
-
- dst_sg = sg_next(dst_sg);
- dst_nents--;
- }
-
- if (src_len == 0) {
-
- /* no more source scatterlist entries */
- if (!src_sg || !src_nents)
- break;
-
- src = sg_dma_address(src_sg);
- src_len = sg_dma_len(src_sg);
-
- src_sg = sg_next(src_sg);
- src_nents--;
- }
-
- len = min_t(size_t, src_len, dst_len);
- if (len == 0)
- continue;
-
- /* take care for the alignment */
- src_width = dst_width = atc_get_xfer_width(src, dst, len);
-
- ctrla = ATC_SRC_WIDTH(src_width) |
- ATC_DST_WIDTH(dst_width);
-
- /*
- * The number of transfers to set up refer to the source width
- * that depends on the alignment.
- */
- xfer_count = len >> src_width;
- if (xfer_count > ATC_BTSIZE_MAX) {
- xfer_count = ATC_BTSIZE_MAX;
- len = ATC_BTSIZE_MAX << src_width;
- }
-
- /* create the transfer */
- desc = atc_desc_get(atchan);
- if (!desc)
- goto err_desc_get;
-
- desc->lli.saddr = src;
- desc->lli.daddr = dst;
- desc->lli.ctrla = ctrla | xfer_count;
- desc->lli.ctrlb = ctrlb;
-
- desc->txd.cookie = 0;
- desc->len = len;
-
- atc_desc_chain(&first, &prev, desc);
-
- /* update the lengths and addresses for the next loop cycle */
- dst_len -= len;
- src_len -= len;
- dst += len;
- src += len;
-
- total_len += len;
- }
-
- /* First descriptor of the chain embedds additional information */
- first->txd.cookie = -EBUSY;
- first->total_len = total_len;
-
- /* set end-of-link to the last link descriptor of list*/
- set_desc_eol(desc);
-
- first->txd.flags = flags; /* client is in control of this ack */
-
- return &first->txd;
-
-err_desc_get:
- atc_desc_put(atchan, first);
- return NULL;
-}
-
-/**
* atc_dma_cyclic_check_values
* Check for too big/unaligned periods and unaligned DMA buffer
*/
@@ -1933,14 +1801,12 @@ static int __init at_dma_probe(struct platform_device *pdev)
/* setup platform data for each SoC */
dma_cap_set(DMA_MEMCPY, at91sam9rl_config.cap_mask);
- dma_cap_set(DMA_SG, at91sam9rl_config.cap_mask);
dma_cap_set(DMA_INTERLEAVE, at91sam9g45_config.cap_mask);
dma_cap_set(DMA_MEMCPY, at91sam9g45_config.cap_mask);
dma_cap_set(DMA_MEMSET, at91sam9g45_config.cap_mask);
dma_cap_set(DMA_MEMSET_SG, at91sam9g45_config.cap_mask);
dma_cap_set(DMA_PRIVATE, at91sam9g45_config.cap_mask);
dma_cap_set(DMA_SLAVE, at91sam9g45_config.cap_mask);
- dma_cap_set(DMA_SG, at91sam9g45_config.cap_mask);
/* get DMA parameters from controller type */
plat_dat = at_dma_get_driver_data(pdev);
@@ -2078,16 +1944,12 @@ static int __init at_dma_probe(struct platform_device *pdev)
atdma->dma_common.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
}
- if (dma_has_cap(DMA_SG, atdma->dma_common.cap_mask))
- atdma->dma_common.device_prep_dma_sg = atc_prep_dma_sg;
-
dma_writel(atdma, EN, AT_DMA_ENABLE);
- dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s%s%s), %d channels\n",
+ dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s%s), %d channels\n",
dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask) ? "cpy " : "",
dma_has_cap(DMA_MEMSET, atdma->dma_common.cap_mask) ? "set " : "",
dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ? "slave " : "",
- dma_has_cap(DMA_SG, atdma->dma_common.cap_mask) ? "sg-cpy " : "",
plat_dat->nr_channels);
dma_async_device_register(&atdma->dma_common);
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index 7d4e0bcda9af..c00e3923d7d8 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -875,7 +875,7 @@ at_xdmac_interleaved_queue_desc(struct dma_chan *chan,
dwidth = at_xdmac_align_width(chan, src | dst | chunk->size);
if (chunk->size >= (AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth)) {
dev_dbg(chan2dev(chan),
- "%s: chunk too big (%d, max size %lu)...\n",
+ "%s: chunk too big (%zu, max size %lu)...\n",
__func__, chunk->size,
AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth);
return NULL;
@@ -956,7 +956,7 @@ at_xdmac_prep_interleaved(struct dma_chan *chan,
if ((xt->numf > 1) && (xt->frame_size > 1))
return NULL;
- dev_dbg(chan2dev(chan), "%s: src=%pad, dest=%pad, numf=%d, frame_size=%d, flags=0x%lx\n",
+ dev_dbg(chan2dev(chan), "%s: src=%pad, dest=%pad, numf=%zu, frame_size=%zu, flags=0x%lx\n",
__func__, &xt->src_start, &xt->dst_start, xt->numf,
xt->frame_size, flags);
@@ -990,7 +990,7 @@ at_xdmac_prep_interleaved(struct dma_chan *chan,
dst_skip = chunk->size + dst_icg;
dev_dbg(chan2dev(chan),
- "%s: chunk size=%d, src icg=%d, dst icg=%d\n",
+ "%s: chunk size=%zu, src icg=%zu, dst icg=%zu\n",
__func__, chunk->size, src_icg, dst_icg);
desc = at_xdmac_interleaved_queue_desc(chan, atchan,
@@ -1207,7 +1207,7 @@ at_xdmac_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
struct at_xdmac_desc *desc;
- dev_dbg(chan2dev(chan), "%s: dest=%pad, len=%d, pattern=0x%x, flags=0x%lx\n",
+ dev_dbg(chan2dev(chan), "%s: dest=%pad, len=%zu, pattern=0x%x, flags=0x%lx\n",
__func__, &dest, len, value, flags);
if (unlikely(!len))
@@ -1883,8 +1883,11 @@ static int atmel_xdmac_resume(struct device *dev)
struct at_xdmac_chan *atchan;
struct dma_chan *chan, *_chan;
int i;
+ int ret;
- clk_prepare_enable(atxdmac->clk);
+ ret = clk_prepare_enable(atxdmac->clk);
+ if (ret)
+ return ret;
/* Clear pending interrupts. */
for (i = 0; i < atxdmac->dma.chancnt; i++) {
diff --git a/drivers/dma/bcm-sba-raid.c b/drivers/dma/bcm-sba-raid.c
index e41bbc7cb094..6c2c44724637 100644
--- a/drivers/dma/bcm-sba-raid.c
+++ b/drivers/dma/bcm-sba-raid.c
@@ -36,6 +36,7 @@
*/
#include <linux/bitops.h>
+#include <linux/debugfs.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/list.h>
@@ -48,7 +49,8 @@
#include "dmaengine.h"
-/* SBA command related defines */
+/* ====== Driver macros and defines ===== */
+
#define SBA_TYPE_SHIFT 48
#define SBA_TYPE_MASK GENMASK(1, 0)
#define SBA_TYPE_A 0x0
@@ -82,39 +84,40 @@
#define SBA_CMD_WRITE_BUFFER 0xc
#define SBA_CMD_GALOIS 0xe
+#define SBA_MAX_REQ_PER_MBOX_CHANNEL 8192
+
/* Driver helper macros */
#define to_sba_request(tx) \
container_of(tx, struct sba_request, tx)
#define to_sba_device(dchan) \
container_of(dchan, struct sba_device, dma_chan)
-enum sba_request_state {
- SBA_REQUEST_STATE_FREE = 1,
- SBA_REQUEST_STATE_ALLOCED = 2,
- SBA_REQUEST_STATE_PENDING = 3,
- SBA_REQUEST_STATE_ACTIVE = 4,
- SBA_REQUEST_STATE_RECEIVED = 5,
- SBA_REQUEST_STATE_COMPLETED = 6,
- SBA_REQUEST_STATE_ABORTED = 7,
+/* ===== Driver data structures ===== */
+
+enum sba_request_flags {
+ SBA_REQUEST_STATE_FREE = 0x001,
+ SBA_REQUEST_STATE_ALLOCED = 0x002,
+ SBA_REQUEST_STATE_PENDING = 0x004,
+ SBA_REQUEST_STATE_ACTIVE = 0x008,
+ SBA_REQUEST_STATE_ABORTED = 0x010,
+ SBA_REQUEST_STATE_MASK = 0x0ff,
+ SBA_REQUEST_FENCE = 0x100,
};
struct sba_request {
/* Global state */
struct list_head node;
struct sba_device *sba;
- enum sba_request_state state;
- bool fence;
+ u32 flags;
/* Chained requests management */
struct sba_request *first;
struct list_head next;
- unsigned int next_count;
atomic_t next_pending_count;
/* BRCM message data */
- void *resp;
- dma_addr_t resp_dma;
- struct brcm_sba_command *cmds;
struct brcm_message msg;
struct dma_async_tx_descriptor tx;
+ /* SBA commands */
+ struct brcm_sba_command cmds[0];
};
enum sba_version {
@@ -152,19 +155,18 @@ struct sba_device {
void *cmds_base;
dma_addr_t cmds_dma_base;
spinlock_t reqs_lock;
- struct sba_request *reqs;
bool reqs_fence;
struct list_head reqs_alloc_list;
struct list_head reqs_pending_list;
struct list_head reqs_active_list;
- struct list_head reqs_received_list;
- struct list_head reqs_completed_list;
struct list_head reqs_aborted_list;
struct list_head reqs_free_list;
- int reqs_free_count;
+ /* DebugFS directory entries */
+ struct dentry *root;
+ struct dentry *stats;
};
-/* ====== SBA command helper routines ===== */
+/* ====== Command helper routines ===== */
static inline u64 __pure sba_cmd_enc(u64 cmd, u32 val, u32 shift, u32 mask)
{
@@ -196,32 +198,50 @@ static inline u32 __pure sba_cmd_pq_c_mdata(u32 d, u32 b1, u32 b0)
((d & SBA_C_MDATA_DNUM_MASK) << SBA_C_MDATA_DNUM_SHIFT);
}
-/* ====== Channel resource management routines ===== */
+/* ====== General helper routines ===== */
+
+static void sba_peek_mchans(struct sba_device *sba)
+{
+ int mchan_idx;
+
+ for (mchan_idx = 0; mchan_idx < sba->mchans_count; mchan_idx++)
+ mbox_client_peek_data(sba->mchans[mchan_idx]);
+}
static struct sba_request *sba_alloc_request(struct sba_device *sba)
{
+ bool found = false;
unsigned long flags;
struct sba_request *req = NULL;
spin_lock_irqsave(&sba->reqs_lock, flags);
+ list_for_each_entry(req, &sba->reqs_free_list, node) {
+ if (async_tx_test_ack(&req->tx)) {
+ list_move_tail(&req->node, &sba->reqs_alloc_list);
+ found = true;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&sba->reqs_lock, flags);
- req = list_first_entry_or_null(&sba->reqs_free_list,
- struct sba_request, node);
- if (req) {
- list_move_tail(&req->node, &sba->reqs_alloc_list);
- req->state = SBA_REQUEST_STATE_ALLOCED;
- req->fence = false;
- req->first = req;
- INIT_LIST_HEAD(&req->next);
- req->next_count = 1;
- atomic_set(&req->next_pending_count, 1);
-
- sba->reqs_free_count--;
-
- dma_async_tx_descriptor_init(&req->tx, &sba->dma_chan);
+ if (!found) {
+ /*
+ * We have no more free requests so, we peek
+ * mailbox channels hoping few active requests
+ * would have completed which will create more
+ * room for new requests.
+ */
+ sba_peek_mchans(sba);
+ return NULL;
}
- spin_unlock_irqrestore(&sba->reqs_lock, flags);
+ req->flags = SBA_REQUEST_STATE_ALLOCED;
+ req->first = req;
+ INIT_LIST_HEAD(&req->next);
+ atomic_set(&req->next_pending_count, 1);
+
+ dma_async_tx_descriptor_init(&req->tx, &sba->dma_chan);
+ async_tx_ack(&req->tx);
return req;
}
@@ -231,7 +251,8 @@ static void _sba_pending_request(struct sba_device *sba,
struct sba_request *req)
{
lockdep_assert_held(&sba->reqs_lock);
- req->state = SBA_REQUEST_STATE_PENDING;
+ req->flags &= ~SBA_REQUEST_STATE_MASK;
+ req->flags |= SBA_REQUEST_STATE_PENDING;
list_move_tail(&req->node, &sba->reqs_pending_list);
if (list_empty(&sba->reqs_active_list))
sba->reqs_fence = false;
@@ -246,9 +267,10 @@ static bool _sba_active_request(struct sba_device *sba,
sba->reqs_fence = false;
if (sba->reqs_fence)
return false;
- req->state = SBA_REQUEST_STATE_ACTIVE;
+ req->flags &= ~SBA_REQUEST_STATE_MASK;
+ req->flags |= SBA_REQUEST_STATE_ACTIVE;
list_move_tail(&req->node, &sba->reqs_active_list);
- if (req->fence)
+ if (req->flags & SBA_REQUEST_FENCE)
sba->reqs_fence = true;
return true;
}
@@ -258,7 +280,8 @@ static void _sba_abort_request(struct sba_device *sba,
struct sba_request *req)
{
lockdep_assert_held(&sba->reqs_lock);
- req->state = SBA_REQUEST_STATE_ABORTED;
+ req->flags &= ~SBA_REQUEST_STATE_MASK;
+ req->flags |= SBA_REQUEST_STATE_ABORTED;
list_move_tail(&req->node, &sba->reqs_aborted_list);
if (list_empty(&sba->reqs_active_list))
sba->reqs_fence = false;
@@ -269,42 +292,11 @@ static void _sba_free_request(struct sba_device *sba,
struct sba_request *req)
{
lockdep_assert_held(&sba->reqs_lock);
- req->state = SBA_REQUEST_STATE_FREE;
+ req->flags &= ~SBA_REQUEST_STATE_MASK;
+ req->flags |= SBA_REQUEST_STATE_FREE;
list_move_tail(&req->node, &sba->reqs_free_list);
if (list_empty(&sba->reqs_active_list))
sba->reqs_fence = false;
- sba->reqs_free_count++;
-}
-
-static void sba_received_request(struct sba_request *req)
-{
- unsigned long flags;
- struct sba_device *sba = req->sba;
-
- spin_lock_irqsave(&sba->reqs_lock, flags);
- req->state = SBA_REQUEST_STATE_RECEIVED;
- list_move_tail(&req->node, &sba->reqs_received_list);
- spin_unlock_irqrestore(&sba->reqs_lock, flags);
-}
-
-static void sba_complete_chained_requests(struct sba_request *req)
-{
- unsigned long flags;
- struct sba_request *nreq;
- struct sba_device *sba = req->sba;
-
- spin_lock_irqsave(&sba->reqs_lock, flags);
-
- req->state = SBA_REQUEST_STATE_COMPLETED;
- list_move_tail(&req->node, &sba->reqs_completed_list);
- list_for_each_entry(nreq, &req->next, next) {
- nreq->state = SBA_REQUEST_STATE_COMPLETED;
- list_move_tail(&nreq->node, &sba->reqs_completed_list);
- }
- if (list_empty(&sba->reqs_active_list))
- sba->reqs_fence = false;
-
- spin_unlock_irqrestore(&sba->reqs_lock, flags);
}
static void sba_free_chained_requests(struct sba_request *req)
@@ -332,8 +324,7 @@ static void sba_chain_request(struct sba_request *first,
list_add_tail(&req->next, &first->next);
req->first = first;
- first->next_count++;
- atomic_set(&first->next_pending_count, first->next_count);
+ atomic_inc(&first->next_pending_count);
spin_unlock_irqrestore(&sba->reqs_lock, flags);
}
@@ -349,14 +340,6 @@ static void sba_cleanup_nonpending_requests(struct sba_device *sba)
list_for_each_entry_safe(req, req1, &sba->reqs_alloc_list, node)
_sba_free_request(sba, req);
- /* Freeup all received request */
- list_for_each_entry_safe(req, req1, &sba->reqs_received_list, node)
- _sba_free_request(sba, req);
-
- /* Freeup all completed request */
- list_for_each_entry_safe(req, req1, &sba->reqs_completed_list, node)
- _sba_free_request(sba, req);
-
/* Set all active requests as aborted */
list_for_each_entry_safe(req, req1, &sba->reqs_active_list, node)
_sba_abort_request(sba, req);
@@ -383,26 +366,6 @@ static void sba_cleanup_pending_requests(struct sba_device *sba)
spin_unlock_irqrestore(&sba->reqs_lock, flags);
}
-/* ====== DMAENGINE callbacks ===== */
-
-static void sba_free_chan_resources(struct dma_chan *dchan)
-{
- /*
- * Channel resources are pre-alloced so we just free-up
- * whatever we can so that we can re-use pre-alloced
- * channel resources next time.
- */
- sba_cleanup_nonpending_requests(to_sba_device(dchan));
-}
-
-static int sba_device_terminate_all(struct dma_chan *dchan)
-{
- /* Cleanup all pending requests */
- sba_cleanup_pending_requests(to_sba_device(dchan));
-
- return 0;
-}
-
static int sba_send_mbox_request(struct sba_device *sba,
struct sba_request *req)
{
@@ -419,42 +382,156 @@ static int sba_send_mbox_request(struct sba_device *sba,
dev_err(sba->dev, "send message failed with error %d", ret);
return ret;
}
+
+ /* Check error returned by mailbox controller */
ret = req->msg.error;
if (ret < 0) {
dev_err(sba->dev, "message error %d", ret);
- return ret;
}
- return 0;
+ /* Signal txdone for mailbox channel */
+ mbox_client_txdone(sba->mchans[mchans_idx], ret);
+
+ return ret;
}
-static void sba_issue_pending(struct dma_chan *dchan)
+/* Note: Must be called with sba->reqs_lock held */
+static void _sba_process_pending_requests(struct sba_device *sba)
{
int ret;
- unsigned long flags;
- struct sba_request *req, *req1;
- struct sba_device *sba = to_sba_device(dchan);
+ u32 count;
+ struct sba_request *req;
- spin_lock_irqsave(&sba->reqs_lock, flags);
+ /*
+ * Process few pending requests
+ *
+ * For now, we process (<number_of_mailbox_channels> * 8)
+ * number of requests at a time.
+ */
+ count = sba->mchans_count * 8;
+ while (!list_empty(&sba->reqs_pending_list) && count) {
+ /* Get the first pending request */
+ req = list_first_entry(&sba->reqs_pending_list,
+ struct sba_request, node);
- /* Process all pending request */
- list_for_each_entry_safe(req, req1, &sba->reqs_pending_list, node) {
/* Try to make request active */
if (!_sba_active_request(sba, req))
break;
/* Send request to mailbox channel */
- spin_unlock_irqrestore(&sba->reqs_lock, flags);
ret = sba_send_mbox_request(sba, req);
- spin_lock_irqsave(&sba->reqs_lock, flags);
-
- /* If something went wrong then keep request pending */
if (ret < 0) {
_sba_pending_request(sba, req);
break;
}
+
+ count--;
+ }
+}
+
+static void sba_process_received_request(struct sba_device *sba,
+ struct sba_request *req)
+{
+ unsigned long flags;
+ struct dma_async_tx_descriptor *tx;
+ struct sba_request *nreq, *first = req->first;
+
+ /* Process only after all chained requests are received */
+ if (!atomic_dec_return(&first->next_pending_count)) {
+ tx = &first->tx;
+
+ WARN_ON(tx->cookie < 0);
+ if (tx->cookie > 0) {
+ dma_cookie_complete(tx);
+ dmaengine_desc_get_callback_invoke(tx, NULL);
+ dma_descriptor_unmap(tx);
+ tx->callback = NULL;
+ tx->callback_result = NULL;
+ }
+
+ dma_run_dependencies(tx);
+
+ spin_lock_irqsave(&sba->reqs_lock, flags);
+
+ /* Free all requests chained to first request */
+ list_for_each_entry(nreq, &first->next, next)
+ _sba_free_request(sba, nreq);
+ INIT_LIST_HEAD(&first->next);
+
+ /* Free the first request */
+ _sba_free_request(sba, first);
+
+ /* Process pending requests */
+ _sba_process_pending_requests(sba);
+
+ spin_unlock_irqrestore(&sba->reqs_lock, flags);
}
+}
+
+static void sba_write_stats_in_seqfile(struct sba_device *sba,
+ struct seq_file *file)
+{
+ unsigned long flags;
+ struct sba_request *req;
+ u32 free_count = 0, alloced_count = 0;
+ u32 pending_count = 0, active_count = 0, aborted_count = 0;
+
+ spin_lock_irqsave(&sba->reqs_lock, flags);
+
+ list_for_each_entry(req, &sba->reqs_free_list, node)
+ if (async_tx_test_ack(&req->tx))
+ free_count++;
+
+ list_for_each_entry(req, &sba->reqs_alloc_list, node)
+ alloced_count++;
+
+ list_for_each_entry(req, &sba->reqs_pending_list, node)
+ pending_count++;
+
+ list_for_each_entry(req, &sba->reqs_active_list, node)
+ active_count++;
+ list_for_each_entry(req, &sba->reqs_aborted_list, node)
+ aborted_count++;
+
+ spin_unlock_irqrestore(&sba->reqs_lock, flags);
+
+ seq_printf(file, "maximum requests = %d\n", sba->max_req);
+ seq_printf(file, "free requests = %d\n", free_count);
+ seq_printf(file, "alloced requests = %d\n", alloced_count);
+ seq_printf(file, "pending requests = %d\n", pending_count);
+ seq_printf(file, "active requests = %d\n", active_count);
+ seq_printf(file, "aborted requests = %d\n", aborted_count);
+}
+
+/* ====== DMAENGINE callbacks ===== */
+
+static void sba_free_chan_resources(struct dma_chan *dchan)
+{
+ /*
+ * Channel resources are pre-alloced so we just free-up
+ * whatever we can so that we can re-use pre-alloced
+ * channel resources next time.
+ */
+ sba_cleanup_nonpending_requests(to_sba_device(dchan));
+}
+
+static int sba_device_terminate_all(struct dma_chan *dchan)
+{
+ /* Cleanup all pending requests */
+ sba_cleanup_pending_requests(to_sba_device(dchan));
+
+ return 0;
+}
+
+static void sba_issue_pending(struct dma_chan *dchan)
+{
+ unsigned long flags;
+ struct sba_device *sba = to_sba_device(dchan);
+
+ /* Process pending requests */
+ spin_lock_irqsave(&sba->reqs_lock, flags);
+ _sba_process_pending_requests(sba);
spin_unlock_irqrestore(&sba->reqs_lock, flags);
}
@@ -486,17 +563,15 @@ static enum dma_status sba_tx_status(struct dma_chan *dchan,
dma_cookie_t cookie,
struct dma_tx_state *txstate)
{
- int mchan_idx;
enum dma_status ret;
struct sba_device *sba = to_sba_device(dchan);
- for (mchan_idx = 0; mchan_idx < sba->mchans_count; mchan_idx++)
- mbox_client_peek_data(sba->mchans[mchan_idx]);
-
ret = dma_cookie_status(dchan, cookie, txstate);
if (ret == DMA_COMPLETE)
return ret;
+ sba_peek_mchans(sba);
+
return dma_cookie_status(dchan, cookie, txstate);
}
@@ -506,6 +581,7 @@ static void sba_fillup_interrupt_msg(struct sba_request *req,
{
u64 cmd;
u32 c_mdata;
+ dma_addr_t resp_dma = req->tx.phys;
struct brcm_sba_command *cmdsp = cmds;
/* Type-B command to load dummy data into buf0 */
@@ -521,7 +597,7 @@ static void sba_fillup_interrupt_msg(struct sba_request *req,
cmdsp->cmd = cmd;
*cmdsp->cmd_dma = cpu_to_le64(cmd);
cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
- cmdsp->data = req->resp_dma;
+ cmdsp->data = resp_dma;
cmdsp->data_len = req->sba->hw_resp_size;
cmdsp++;
@@ -542,11 +618,11 @@ static void sba_fillup_interrupt_msg(struct sba_request *req,
cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
if (req->sba->hw_resp_size) {
cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP;
- cmdsp->resp = req->resp_dma;
+ cmdsp->resp = resp_dma;
cmdsp->resp_len = req->sba->hw_resp_size;
}
cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT;
- cmdsp->data = req->resp_dma;
+ cmdsp->data = resp_dma;
cmdsp->data_len = req->sba->hw_resp_size;
cmdsp++;
@@ -573,7 +649,7 @@ sba_prep_dma_interrupt(struct dma_chan *dchan, unsigned long flags)
* Force fence so that no requests are submitted
* until DMA callback for this request is invoked.
*/
- req->fence = true;
+ req->flags |= SBA_REQUEST_FENCE;
/* Fillup request message */
sba_fillup_interrupt_msg(req, req->cmds, &req->msg);
@@ -593,6 +669,7 @@ static void sba_fillup_memcpy_msg(struct sba_request *req,
{
u64 cmd;
u32 c_mdata;
+ dma_addr_t resp_dma = req->tx.phys;
struct brcm_sba_command *cmdsp = cmds;
/* Type-B command to load data into buf0 */
@@ -629,7 +706,7 @@ static void sba_fillup_memcpy_msg(struct sba_request *req,
cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
if (req->sba->hw_resp_size) {
cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP;
- cmdsp->resp = req->resp_dma;
+ cmdsp->resp = resp_dma;
cmdsp->resp_len = req->sba->hw_resp_size;
}
cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT;
@@ -656,7 +733,8 @@ sba_prep_dma_memcpy_req(struct sba_device *sba,
req = sba_alloc_request(sba);
if (!req)
return NULL;
- req->fence = (flags & DMA_PREP_FENCE) ? true : false;
+ if (flags & DMA_PREP_FENCE)
+ req->flags |= SBA_REQUEST_FENCE;
/* Fillup request message */
sba_fillup_memcpy_msg(req, req->cmds, &req->msg,
@@ -711,6 +789,7 @@ static void sba_fillup_xor_msg(struct sba_request *req,
u64 cmd;
u32 c_mdata;
unsigned int i;
+ dma_addr_t resp_dma = req->tx.phys;
struct brcm_sba_command *cmdsp = cmds;
/* Type-B command to load data into buf0 */
@@ -766,7 +845,7 @@ static void sba_fillup_xor_msg(struct sba_request *req,
cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
if (req->sba->hw_resp_size) {
cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP;
- cmdsp->resp = req->resp_dma;
+ cmdsp->resp = resp_dma;
cmdsp->resp_len = req->sba->hw_resp_size;
}
cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT;
@@ -782,7 +861,7 @@ static void sba_fillup_xor_msg(struct sba_request *req,
msg->error = 0;
}
-struct sba_request *
+static struct sba_request *
sba_prep_dma_xor_req(struct sba_device *sba,
dma_addr_t off, dma_addr_t dst, dma_addr_t *src,
u32 src_cnt, size_t len, unsigned long flags)
@@ -793,7 +872,8 @@ sba_prep_dma_xor_req(struct sba_device *sba,
req = sba_alloc_request(sba);
if (!req)
return NULL;
- req->fence = (flags & DMA_PREP_FENCE) ? true : false;
+ if (flags & DMA_PREP_FENCE)
+ req->flags |= SBA_REQUEST_FENCE;
/* Fillup request message */
sba_fillup_xor_msg(req, req->cmds, &req->msg,
@@ -854,6 +934,7 @@ static void sba_fillup_pq_msg(struct sba_request *req,
u64 cmd;
u32 c_mdata;
unsigned int i;
+ dma_addr_t resp_dma = req->tx.phys;
struct brcm_sba_command *cmdsp = cmds;
if (pq_continue) {
@@ -947,7 +1028,7 @@ static void sba_fillup_pq_msg(struct sba_request *req,
cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
if (req->sba->hw_resp_size) {
cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP;
- cmdsp->resp = req->resp_dma;
+ cmdsp->resp = resp_dma;
cmdsp->resp_len = req->sba->hw_resp_size;
}
cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT;
@@ -974,7 +1055,7 @@ static void sba_fillup_pq_msg(struct sba_request *req,
cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
if (req->sba->hw_resp_size) {
cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP;
- cmdsp->resp = req->resp_dma;
+ cmdsp->resp = resp_dma;
cmdsp->resp_len = req->sba->hw_resp_size;
}
cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT;
@@ -991,7 +1072,7 @@ static void sba_fillup_pq_msg(struct sba_request *req,
msg->error = 0;
}
-struct sba_request *
+static struct sba_request *
sba_prep_dma_pq_req(struct sba_device *sba, dma_addr_t off,
dma_addr_t *dst_p, dma_addr_t *dst_q, dma_addr_t *src,
u32 src_cnt, const u8 *scf, size_t len, unsigned long flags)
@@ -1002,7 +1083,8 @@ sba_prep_dma_pq_req(struct sba_device *sba, dma_addr_t off,
req = sba_alloc_request(sba);
if (!req)
return NULL;
- req->fence = (flags & DMA_PREP_FENCE) ? true : false;
+ if (flags & DMA_PREP_FENCE)
+ req->flags |= SBA_REQUEST_FENCE;
/* Fillup request messages */
sba_fillup_pq_msg(req, dmaf_continue(flags),
@@ -1027,6 +1109,7 @@ static void sba_fillup_pq_single_msg(struct sba_request *req,
u64 cmd;
u32 c_mdata;
u8 pos, dpos = raid6_gflog[scf];
+ dma_addr_t resp_dma = req->tx.phys;
struct brcm_sba_command *cmdsp = cmds;
if (!dst_p)
@@ -1105,7 +1188,7 @@ static void sba_fillup_pq_single_msg(struct sba_request *req,
cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
if (req->sba->hw_resp_size) {
cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP;
- cmdsp->resp = req->resp_dma;
+ cmdsp->resp = resp_dma;
cmdsp->resp_len = req->sba->hw_resp_size;
}
cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT;
@@ -1226,7 +1309,7 @@ skip_q_computation:
cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
if (req->sba->hw_resp_size) {
cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP;
- cmdsp->resp = req->resp_dma;
+ cmdsp->resp = resp_dma;
cmdsp->resp_len = req->sba->hw_resp_size;
}
cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT;
@@ -1243,7 +1326,7 @@ skip_q:
msg->error = 0;
}
-struct sba_request *
+static struct sba_request *
sba_prep_dma_pq_single_req(struct sba_device *sba, dma_addr_t off,
dma_addr_t *dst_p, dma_addr_t *dst_q,
dma_addr_t src, u8 scf, size_t len,
@@ -1255,7 +1338,8 @@ sba_prep_dma_pq_single_req(struct sba_device *sba, dma_addr_t off,
req = sba_alloc_request(sba);
if (!req)
return NULL;
- req->fence = (flags & DMA_PREP_FENCE) ? true : false;
+ if (flags & DMA_PREP_FENCE)
+ req->flags |= SBA_REQUEST_FENCE;
/* Fillup request messages */
sba_fillup_pq_single_msg(req, dmaf_continue(flags),
@@ -1370,40 +1454,10 @@ fail:
/* ====== Mailbox callbacks ===== */
-static void sba_dma_tx_actions(struct sba_request *req)
-{
- struct dma_async_tx_descriptor *tx = &req->tx;
-
- WARN_ON(tx->cookie < 0);
-
- if (tx->cookie > 0) {
- dma_cookie_complete(tx);
-
- /*
- * Call the callback (must not sleep or submit new
- * operations to this channel)
- */
- if (tx->callback)
- tx->callback(tx->callback_param);
-
- dma_descriptor_unmap(tx);
- }
-
- /* Run dependent operations */
- dma_run_dependencies(tx);
-
- /* If waiting for 'ack' then move to completed list */
- if (!async_tx_test_ack(&req->tx))
- sba_complete_chained_requests(req);
- else
- sba_free_chained_requests(req);
-}
-
static void sba_receive_message(struct mbox_client *cl, void *msg)
{
- unsigned long flags;
struct brcm_message *m = msg;
- struct sba_request *req = m->ctx, *req1;
+ struct sba_request *req = m->ctx;
struct sba_device *sba = req->sba;
/* Error count if message has error */
@@ -1411,52 +1465,37 @@ static void sba_receive_message(struct mbox_client *cl, void *msg)
dev_err(sba->dev, "%s got message with error %d",
dma_chan_name(&sba->dma_chan), m->error);
- /* Mark request as received */
- sba_received_request(req);
-
- /* Wait for all chained requests to be completed */
- if (atomic_dec_return(&req->first->next_pending_count))
- goto done;
-
- /* Point to first request */
- req = req->first;
-
- /* Update request */
- if (req->state == SBA_REQUEST_STATE_RECEIVED)
- sba_dma_tx_actions(req);
- else
- sba_free_chained_requests(req);
+ /* Process received request */
+ sba_process_received_request(sba, req);
+}
- spin_lock_irqsave(&sba->reqs_lock, flags);
+/* ====== Debugfs callbacks ====== */
- /* Re-check all completed request waiting for 'ack' */
- list_for_each_entry_safe(req, req1, &sba->reqs_completed_list, node) {
- spin_unlock_irqrestore(&sba->reqs_lock, flags);
- sba_dma_tx_actions(req);
- spin_lock_irqsave(&sba->reqs_lock, flags);
- }
+static int sba_debugfs_stats_show(struct seq_file *file, void *offset)
+{
+ struct platform_device *pdev = to_platform_device(file->private);
+ struct sba_device *sba = platform_get_drvdata(pdev);
- spin_unlock_irqrestore(&sba->reqs_lock, flags);
+ /* Write stats in file */
+ sba_write_stats_in_seqfile(sba, file);
-done:
- /* Try to submit pending request */
- sba_issue_pending(&sba->dma_chan);
+ return 0;
}
/* ====== Platform driver routines ===== */
static int sba_prealloc_channel_resources(struct sba_device *sba)
{
- int i, j, p, ret = 0;
+ int i, j, ret = 0;
struct sba_request *req = NULL;
- sba->resp_base = dma_alloc_coherent(sba->dma_dev.dev,
+ sba->resp_base = dma_alloc_coherent(sba->mbox_dev,
sba->max_resp_pool_size,
&sba->resp_dma_base, GFP_KERNEL);
if (!sba->resp_base)
return -ENOMEM;
- sba->cmds_base = dma_alloc_coherent(sba->dma_dev.dev,
+ sba->cmds_base = dma_alloc_coherent(sba->mbox_dev,
sba->max_cmds_pool_size,
&sba->cmds_dma_base, GFP_KERNEL);
if (!sba->cmds_base) {
@@ -1469,36 +1508,23 @@ static int sba_prealloc_channel_resources(struct sba_device *sba)
INIT_LIST_HEAD(&sba->reqs_alloc_list);
INIT_LIST_HEAD(&sba->reqs_pending_list);
INIT_LIST_HEAD(&sba->reqs_active_list);
- INIT_LIST_HEAD(&sba->reqs_received_list);
- INIT_LIST_HEAD(&sba->reqs_completed_list);
INIT_LIST_HEAD(&sba->reqs_aborted_list);
INIT_LIST_HEAD(&sba->reqs_free_list);
- sba->reqs = devm_kcalloc(sba->dev, sba->max_req,
- sizeof(*req), GFP_KERNEL);
- if (!sba->reqs) {
- ret = -ENOMEM;
- goto fail_free_cmds_pool;
- }
-
- for (i = 0, p = 0; i < sba->max_req; i++) {
- req = &sba->reqs[i];
+ for (i = 0; i < sba->max_req; i++) {
+ req = devm_kzalloc(sba->dev,
+ sizeof(*req) +
+ sba->max_cmd_per_req * sizeof(req->cmds[0]),
+ GFP_KERNEL);
+ if (!req) {
+ ret = -ENOMEM;
+ goto fail_free_cmds_pool;
+ }
INIT_LIST_HEAD(&req->node);
req->sba = sba;
- req->state = SBA_REQUEST_STATE_FREE;
+ req->flags = SBA_REQUEST_STATE_FREE;
INIT_LIST_HEAD(&req->next);
- req->next_count = 1;
atomic_set(&req->next_pending_count, 0);
- req->fence = false;
- req->resp = sba->resp_base + p;
- req->resp_dma = sba->resp_dma_base + p;
- p += sba->hw_resp_size;
- req->cmds = devm_kcalloc(sba->dev, sba->max_cmd_per_req,
- sizeof(*req->cmds), GFP_KERNEL);
- if (!req->cmds) {
- ret = -ENOMEM;
- goto fail_free_cmds_pool;
- }
for (j = 0; j < sba->max_cmd_per_req; j++) {
req->cmds[j].cmd = 0;
req->cmds[j].cmd_dma = sba->cmds_base +
@@ -1509,21 +1535,20 @@ static int sba_prealloc_channel_resources(struct sba_device *sba)
}
memset(&req->msg, 0, sizeof(req->msg));
dma_async_tx_descriptor_init(&req->tx, &sba->dma_chan);
+ async_tx_ack(&req->tx);
req->tx.tx_submit = sba_tx_submit;
- req->tx.phys = req->resp_dma;
+ req->tx.phys = sba->resp_dma_base + i * sba->hw_resp_size;
list_add_tail(&req->node, &sba->reqs_free_list);
}
- sba->reqs_free_count = sba->max_req;
-
return 0;
fail_free_cmds_pool:
- dma_free_coherent(sba->dma_dev.dev,
+ dma_free_coherent(sba->mbox_dev,
sba->max_cmds_pool_size,
sba->cmds_base, sba->cmds_dma_base);
fail_free_resp_pool:
- dma_free_coherent(sba->dma_dev.dev,
+ dma_free_coherent(sba->mbox_dev,
sba->max_resp_pool_size,
sba->resp_base, sba->resp_dma_base);
return ret;
@@ -1532,9 +1557,9 @@ fail_free_resp_pool:
static void sba_freeup_channel_resources(struct sba_device *sba)
{
dmaengine_terminate_all(&sba->dma_chan);
- dma_free_coherent(sba->dma_dev.dev, sba->max_cmds_pool_size,
+ dma_free_coherent(sba->mbox_dev, sba->max_cmds_pool_size,
sba->cmds_base, sba->cmds_dma_base);
- dma_free_coherent(sba->dma_dev.dev, sba->max_resp_pool_size,
+ dma_free_coherent(sba->mbox_dev, sba->max_resp_pool_size,
sba->resp_base, sba->resp_dma_base);
sba->resp_base = NULL;
sba->resp_dma_base = 0;
@@ -1625,6 +1650,13 @@ static int sba_probe(struct platform_device *pdev)
sba->dev = &pdev->dev;
platform_set_drvdata(pdev, sba);
+ /* Number of channels equals number of mailbox channels */
+ ret = of_count_phandle_with_args(pdev->dev.of_node,
+ "mboxes", "#mbox-cells");
+ if (ret <= 0)
+ return -ENODEV;
+ mchans_count = ret;
+
/* Determine SBA version from DT compatible string */
if (of_device_is_compatible(sba->dev->of_node, "brcm,iproc-sba"))
sba->ver = SBA_VER_1;
@@ -1637,14 +1669,12 @@ static int sba_probe(struct platform_device *pdev)
/* Derived Configuration parameters */
switch (sba->ver) {
case SBA_VER_1:
- sba->max_req = 1024;
sba->hw_buf_size = 4096;
sba->hw_resp_size = 8;
sba->max_pq_coefs = 6;
sba->max_pq_srcs = 6;
break;
case SBA_VER_2:
- sba->max_req = 1024;
sba->hw_buf_size = 4096;
sba->hw_resp_size = 8;
sba->max_pq_coefs = 30;
@@ -1658,6 +1688,7 @@ static int sba_probe(struct platform_device *pdev)
default:
return -EINVAL;
}
+ sba->max_req = SBA_MAX_REQ_PER_MBOX_CHANNEL * mchans_count;
sba->max_cmd_per_req = sba->max_pq_srcs + 3;
sba->max_xor_srcs = sba->max_cmd_per_req - 1;
sba->max_resp_pool_size = sba->max_req * sba->hw_resp_size;
@@ -1668,25 +1699,17 @@ static int sba_probe(struct platform_device *pdev)
sba->client.dev = &pdev->dev;
sba->client.rx_callback = sba_receive_message;
sba->client.tx_block = false;
- sba->client.knows_txdone = false;
+ sba->client.knows_txdone = true;
sba->client.tx_tout = 0;
- /* Number of channels equals number of mailbox channels */
- ret = of_count_phandle_with_args(pdev->dev.of_node,
- "mboxes", "#mbox-cells");
- if (ret <= 0)
- return -ENODEV;
- mchans_count = ret;
- sba->mchans_count = 0;
- atomic_set(&sba->mchans_current, 0);
-
/* Allocate mailbox channel array */
- sba->mchans = devm_kcalloc(&pdev->dev, sba->mchans_count,
+ sba->mchans = devm_kcalloc(&pdev->dev, mchans_count,
sizeof(*sba->mchans), GFP_KERNEL);
if (!sba->mchans)
return -ENOMEM;
/* Request mailbox channels */
+ sba->mchans_count = 0;
for (i = 0; i < mchans_count; i++) {
sba->mchans[i] = mbox_request_channel(&sba->client, i);
if (IS_ERR(sba->mchans[i])) {
@@ -1695,6 +1718,7 @@ static int sba_probe(struct platform_device *pdev)
}
sba->mchans_count++;
}
+ atomic_set(&sba->mchans_current, 0);
/* Find-out underlying mailbox device */
ret = of_parse_phandle_with_args(pdev->dev.of_node,
@@ -1723,15 +1747,34 @@ static int sba_probe(struct platform_device *pdev)
}
}
- /* Register DMA device with linux async framework */
- ret = sba_async_register(sba);
+ /* Prealloc channel resource */
+ ret = sba_prealloc_channel_resources(sba);
if (ret)
goto fail_free_mchans;
- /* Prealloc channel resource */
- ret = sba_prealloc_channel_resources(sba);
+ /* Check availability of debugfs */
+ if (!debugfs_initialized())
+ goto skip_debugfs;
+
+ /* Create debugfs root entry */
+ sba->root = debugfs_create_dir(dev_name(sba->dev), NULL);
+ if (IS_ERR_OR_NULL(sba->root)) {
+ dev_err(sba->dev, "failed to create debugfs root entry\n");
+ sba->root = NULL;
+ goto skip_debugfs;
+ }
+
+ /* Create debugfs stats entry */
+ sba->stats = debugfs_create_devm_seqfile(sba->dev, "stats", sba->root,
+ sba_debugfs_stats_show);
+ if (IS_ERR_OR_NULL(sba->stats))
+ dev_err(sba->dev, "failed to create debugfs stats file\n");
+skip_debugfs:
+
+ /* Register DMA device with Linux async framework */
+ ret = sba_async_register(sba);
if (ret)
- goto fail_async_dev_unreg;
+ goto fail_free_resources;
/* Print device info */
dev_info(sba->dev, "%s using SBAv%d and %d mailbox channels",
@@ -1740,8 +1783,9 @@ static int sba_probe(struct platform_device *pdev)
return 0;
-fail_async_dev_unreg:
- dma_async_device_unregister(&sba->dma_dev);
+fail_free_resources:
+ debugfs_remove_recursive(sba->root);
+ sba_freeup_channel_resources(sba);
fail_free_mchans:
for (i = 0; i < sba->mchans_count; i++)
mbox_free_channel(sba->mchans[i]);
@@ -1753,10 +1797,12 @@ static int sba_remove(struct platform_device *pdev)
int i;
struct sba_device *sba = platform_get_drvdata(pdev);
- sba_freeup_channel_resources(sba);
-
dma_async_device_unregister(&sba->dma_dev);
+ debugfs_remove_recursive(sba->root);
+
+ sba_freeup_channel_resources(sba);
+
for (i = 0; i < sba->mchans_count; i++)
mbox_free_channel(sba->mchans[i]);
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index d9118ec23025..b451354735d3 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -923,30 +923,85 @@ int dma_async_device_register(struct dma_device *device)
return -ENODEV;
/* validate device routines */
- BUG_ON(dma_has_cap(DMA_MEMCPY, device->cap_mask) &&
- !device->device_prep_dma_memcpy);
- BUG_ON(dma_has_cap(DMA_XOR, device->cap_mask) &&
- !device->device_prep_dma_xor);
- BUG_ON(dma_has_cap(DMA_XOR_VAL, device->cap_mask) &&
- !device->device_prep_dma_xor_val);
- BUG_ON(dma_has_cap(DMA_PQ, device->cap_mask) &&
- !device->device_prep_dma_pq);
- BUG_ON(dma_has_cap(DMA_PQ_VAL, device->cap_mask) &&
- !device->device_prep_dma_pq_val);
- BUG_ON(dma_has_cap(DMA_MEMSET, device->cap_mask) &&
- !device->device_prep_dma_memset);
- BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) &&
- !device->device_prep_dma_interrupt);
- BUG_ON(dma_has_cap(DMA_SG, device->cap_mask) &&
- !device->device_prep_dma_sg);
- BUG_ON(dma_has_cap(DMA_CYCLIC, device->cap_mask) &&
- !device->device_prep_dma_cyclic);
- BUG_ON(dma_has_cap(DMA_INTERLEAVE, device->cap_mask) &&
- !device->device_prep_interleaved_dma);
-
- BUG_ON(!device->device_tx_status);
- BUG_ON(!device->device_issue_pending);
- BUG_ON(!device->dev);
+ if (!device->dev) {
+ pr_err("DMAdevice must have dev\n");
+ return -EIO;
+ }
+
+ if (dma_has_cap(DMA_MEMCPY, device->cap_mask) && !device->device_prep_dma_memcpy) {
+ dev_err(device->dev,
+ "Device claims capability %s, but op is not defined\n",
+ "DMA_MEMCPY");
+ return -EIO;
+ }
+
+ if (dma_has_cap(DMA_XOR, device->cap_mask) && !device->device_prep_dma_xor) {
+ dev_err(device->dev,
+ "Device claims capability %s, but op is not defined\n",
+ "DMA_XOR");
+ return -EIO;
+ }
+
+ if (dma_has_cap(DMA_XOR_VAL, device->cap_mask) && !device->device_prep_dma_xor_val) {
+ dev_err(device->dev,
+ "Device claims capability %s, but op is not defined\n",
+ "DMA_XOR_VAL");
+ return -EIO;
+ }
+
+ if (dma_has_cap(DMA_PQ, device->cap_mask) && !device->device_prep_dma_pq) {
+ dev_err(device->dev,
+ "Device claims capability %s, but op is not defined\n",
+ "DMA_PQ");
+ return -EIO;
+ }
+
+ if (dma_has_cap(DMA_PQ_VAL, device->cap_mask) && !device->device_prep_dma_pq_val) {
+ dev_err(device->dev,
+ "Device claims capability %s, but op is not defined\n",
+ "DMA_PQ_VAL");
+ return -EIO;
+ }
+
+ if (dma_has_cap(DMA_MEMSET, device->cap_mask) && !device->device_prep_dma_memset) {
+ dev_err(device->dev,
+ "Device claims capability %s, but op is not defined\n",
+ "DMA_MEMSET");
+ return -EIO;
+ }
+
+ if (dma_has_cap(DMA_INTERRUPT, device->cap_mask) && !device->device_prep_dma_interrupt) {
+ dev_err(device->dev,
+ "Device claims capability %s, but op is not defined\n",
+ "DMA_INTERRUPT");
+ return -EIO;
+ }
+
+ if (dma_has_cap(DMA_CYCLIC, device->cap_mask) && !device->device_prep_dma_cyclic) {
+ dev_err(device->dev,
+ "Device claims capability %s, but op is not defined\n",
+ "DMA_CYCLIC");
+ return -EIO;
+ }
+
+ if (dma_has_cap(DMA_INTERLEAVE, device->cap_mask) && !device->device_prep_interleaved_dma) {
+ dev_err(device->dev,
+ "Device claims capability %s, but op is not defined\n",
+ "DMA_INTERLEAVE");
+ return -EIO;
+ }
+
+
+ if (!device->device_tx_status) {
+ dev_err(device->dev, "Device tx_status is not defined\n");
+ return -EIO;
+ }
+
+
+ if (!device->device_issue_pending) {
+ dev_err(device->dev, "Device issue_pending is not defined\n");
+ return -EIO;
+ }
/* note: this only matters in the
* CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index a07ef3d6b3ec..34ff53290b03 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -52,15 +52,10 @@ module_param(iterations, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(iterations,
"Iterations before stopping test (default: infinite)");
-static unsigned int sg_buffers = 1;
-module_param(sg_buffers, uint, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(sg_buffers,
- "Number of scatter gather buffers (default: 1)");
-
static unsigned int dmatest;
module_param(dmatest, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(dmatest,
- "dmatest 0-memcpy 1-slave_sg (default: 0)");
+ "dmatest 0-memcpy 1-memset (default: 0)");
static unsigned int xor_sources = 3;
module_param(xor_sources, uint, S_IRUGO | S_IWUSR);
@@ -158,6 +153,7 @@ MODULE_PARM_DESC(run, "Run the test (default: false)");
#define PATTERN_COPY 0x40
#define PATTERN_OVERWRITE 0x20
#define PATTERN_COUNT_MASK 0x1f
+#define PATTERN_MEMSET_IDX 0x01
struct dmatest_thread {
struct list_head node;
@@ -239,46 +235,62 @@ static unsigned long dmatest_random(void)
return buf;
}
+static inline u8 gen_inv_idx(u8 index, bool is_memset)
+{
+ u8 val = is_memset ? PATTERN_MEMSET_IDX : index;
+
+ return ~val & PATTERN_COUNT_MASK;
+}
+
+static inline u8 gen_src_value(u8 index, bool is_memset)
+{
+ return PATTERN_SRC | gen_inv_idx(index, is_memset);
+}
+
+static inline u8 gen_dst_value(u8 index, bool is_memset)
+{
+ return PATTERN_DST | gen_inv_idx(index, is_memset);
+}
+
static void dmatest_init_srcs(u8 **bufs, unsigned int start, unsigned int len,
- unsigned int buf_size)
+ unsigned int buf_size, bool is_memset)
{
unsigned int i;
u8 *buf;
for (; (buf = *bufs); bufs++) {
for (i = 0; i < start; i++)
- buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK);
+ buf[i] = gen_src_value(i, is_memset);
for ( ; i < start + len; i++)
- buf[i] = PATTERN_SRC | PATTERN_COPY
- | (~i & PATTERN_COUNT_MASK);
+ buf[i] = gen_src_value(i, is_memset) | PATTERN_COPY;
for ( ; i < buf_size; i++)
- buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK);
+ buf[i] = gen_src_value(i, is_memset);
buf++;
}
}
static void dmatest_init_dsts(u8 **bufs, unsigned int start, unsigned int len,
- unsigned int buf_size)
+ unsigned int buf_size, bool is_memset)
{
unsigned int i;
u8 *buf;
for (; (buf = *bufs); bufs++) {
for (i = 0; i < start; i++)
- buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK);
+ buf[i] = gen_dst_value(i, is_memset);
for ( ; i < start + len; i++)
- buf[i] = PATTERN_DST | PATTERN_OVERWRITE
- | (~i & PATTERN_COUNT_MASK);
+ buf[i] = gen_dst_value(i, is_memset) |
+ PATTERN_OVERWRITE;
for ( ; i < buf_size; i++)
- buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK);
+ buf[i] = gen_dst_value(i, is_memset);
}
}
static void dmatest_mismatch(u8 actual, u8 pattern, unsigned int index,
- unsigned int counter, bool is_srcbuf)
+ unsigned int counter, bool is_srcbuf, bool is_memset)
{
u8 diff = actual ^ pattern;
- u8 expected = pattern | (~counter & PATTERN_COUNT_MASK);
+ u8 expected = pattern | gen_inv_idx(counter, is_memset);
const char *thread_name = current->comm;
if (is_srcbuf)
@@ -298,7 +310,7 @@ static void dmatest_mismatch(u8 actual, u8 pattern, unsigned int index,
static unsigned int dmatest_verify(u8 **bufs, unsigned int start,
unsigned int end, unsigned int counter, u8 pattern,
- bool is_srcbuf)
+ bool is_srcbuf, bool is_memset)
{
unsigned int i;
unsigned int error_count = 0;
@@ -311,11 +323,12 @@ static unsigned int dmatest_verify(u8 **bufs, unsigned int start,
counter = counter_orig;
for (i = start; i < end; i++) {
actual = buf[i];
- expected = pattern | (~counter & PATTERN_COUNT_MASK);
+ expected = pattern | gen_inv_idx(counter, is_memset);
if (actual != expected) {
if (error_count < MAX_ERROR_COUNT)
dmatest_mismatch(actual, pattern, i,
- counter, is_srcbuf);
+ counter, is_srcbuf,
+ is_memset);
error_count++;
}
counter++;
@@ -435,6 +448,7 @@ static int dmatest_func(void *data)
s64 runtime = 0;
unsigned long long total_len = 0;
u8 align = 0;
+ bool is_memset = false;
set_freezable();
@@ -448,9 +462,10 @@ static int dmatest_func(void *data)
if (thread->type == DMA_MEMCPY) {
align = dev->copy_align;
src_cnt = dst_cnt = 1;
- } else if (thread->type == DMA_SG) {
- align = dev->copy_align;
- src_cnt = dst_cnt = sg_buffers;
+ } else if (thread->type == DMA_MEMSET) {
+ align = dev->fill_align;
+ src_cnt = dst_cnt = 1;
+ is_memset = true;
} else if (thread->type == DMA_XOR) {
/* force odd to ensure dst = src */
src_cnt = min_odd(params->xor_sources | 1, dev->max_xor);
@@ -530,8 +545,6 @@ static int dmatest_func(void *data)
dma_addr_t srcs[src_cnt];
dma_addr_t *dsts;
unsigned int src_off, dst_off, len;
- struct scatterlist tx_sg[src_cnt];
- struct scatterlist rx_sg[src_cnt];
total_tests++;
@@ -571,9 +584,9 @@ static int dmatest_func(void *data)
dst_off = (dst_off >> align) << align;
dmatest_init_srcs(thread->srcs, src_off, len,
- params->buf_size);
+ params->buf_size, is_memset);
dmatest_init_dsts(thread->dsts, dst_off, len,
- params->buf_size);
+ params->buf_size, is_memset);
diff = ktime_sub(ktime_get(), start);
filltime = ktime_add(filltime, diff);
@@ -627,22 +640,15 @@ static int dmatest_func(void *data)
um->bidi_cnt++;
}
- sg_init_table(tx_sg, src_cnt);
- sg_init_table(rx_sg, src_cnt);
- for (i = 0; i < src_cnt; i++) {
- sg_dma_address(&rx_sg[i]) = srcs[i];
- sg_dma_address(&tx_sg[i]) = dsts[i] + dst_off;
- sg_dma_len(&tx_sg[i]) = len;
- sg_dma_len(&rx_sg[i]) = len;
- }
-
if (thread->type == DMA_MEMCPY)
tx = dev->device_prep_dma_memcpy(chan,
dsts[0] + dst_off,
srcs[0], len, flags);
- else if (thread->type == DMA_SG)
- tx = dev->device_prep_dma_sg(chan, tx_sg, src_cnt,
- rx_sg, src_cnt, flags);
+ else if (thread->type == DMA_MEMSET)
+ tx = dev->device_prep_dma_memset(chan,
+ dsts[0] + dst_off,
+ *(thread->srcs[0] + src_off),
+ len, flags);
else if (thread->type == DMA_XOR)
tx = dev->device_prep_dma_xor(chan,
dsts[0] + dst_off,
@@ -722,23 +728,25 @@ static int dmatest_func(void *data)
start = ktime_get();
pr_debug("%s: verifying source buffer...\n", current->comm);
error_count = dmatest_verify(thread->srcs, 0, src_off,
- 0, PATTERN_SRC, true);
+ 0, PATTERN_SRC, true, is_memset);
error_count += dmatest_verify(thread->srcs, src_off,
src_off + len, src_off,
- PATTERN_SRC | PATTERN_COPY, true);
+ PATTERN_SRC | PATTERN_COPY, true, is_memset);
error_count += dmatest_verify(thread->srcs, src_off + len,
params->buf_size, src_off + len,
- PATTERN_SRC, true);
+ PATTERN_SRC, true, is_memset);
pr_debug("%s: verifying dest buffer...\n", current->comm);
error_count += dmatest_verify(thread->dsts, 0, dst_off,
- 0, PATTERN_DST, false);
+ 0, PATTERN_DST, false, is_memset);
+
error_count += dmatest_verify(thread->dsts, dst_off,
dst_off + len, src_off,
- PATTERN_SRC | PATTERN_COPY, false);
+ PATTERN_SRC | PATTERN_COPY, false, is_memset);
+
error_count += dmatest_verify(thread->dsts, dst_off + len,
params->buf_size, dst_off + len,
- PATTERN_DST, false);
+ PATTERN_DST, false, is_memset);
diff = ktime_sub(ktime_get(), start);
comparetime = ktime_add(comparetime, diff);
@@ -821,8 +829,8 @@ static int dmatest_add_threads(struct dmatest_info *info,
if (type == DMA_MEMCPY)
op = "copy";
- else if (type == DMA_SG)
- op = "sg";
+ else if (type == DMA_MEMSET)
+ op = "set";
else if (type == DMA_XOR)
op = "xor";
else if (type == DMA_PQ)
@@ -883,9 +891,9 @@ static int dmatest_add_channel(struct dmatest_info *info,
}
}
- if (dma_has_cap(DMA_SG, dma_dev->cap_mask)) {
+ if (dma_has_cap(DMA_MEMSET, dma_dev->cap_mask)) {
if (dmatest == 1) {
- cnt = dmatest_add_threads(info, dtc, DMA_SG);
+ cnt = dmatest_add_threads(info, dtc, DMA_MEMSET);
thread_count += cnt > 0 ? cnt : 0;
}
}
@@ -961,8 +969,8 @@ static void run_threaded_test(struct dmatest_info *info)
params->noverify = noverify;
request_channels(info, DMA_MEMCPY);
+ request_channels(info, DMA_MEMSET);
request_channels(info, DMA_XOR);
- request_channels(info, DMA_SG);
request_channels(info, DMA_PQ);
}
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index 3b8b752ede2d..3eaece888e75 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -825,122 +825,6 @@ fail:
return NULL;
}
-static struct dma_async_tx_descriptor *fsl_dma_prep_sg(struct dma_chan *dchan,
- struct scatterlist *dst_sg, unsigned int dst_nents,
- struct scatterlist *src_sg, unsigned int src_nents,
- unsigned long flags)
-{
- struct fsl_desc_sw *first = NULL, *prev = NULL, *new = NULL;
- struct fsldma_chan *chan = to_fsl_chan(dchan);
- size_t dst_avail, src_avail;
- dma_addr_t dst, src;
- size_t len;
-
- /* basic sanity checks */
- if (dst_nents == 0 || src_nents == 0)
- return NULL;
-
- if (dst_sg == NULL || src_sg == NULL)
- return NULL;
-
- /*
- * TODO: should we check that both scatterlists have the same
- * TODO: number of bytes in total? Is that really an error?
- */
-
- /* get prepared for the loop */
- dst_avail = sg_dma_len(dst_sg);
- src_avail = sg_dma_len(src_sg);
-
- /* run until we are out of scatterlist entries */
- while (true) {
-
- /* create the largest transaction possible */
- len = min_t(size_t, src_avail, dst_avail);
- len = min_t(size_t, len, FSL_DMA_BCR_MAX_CNT);
- if (len == 0)
- goto fetch;
-
- dst = sg_dma_address(dst_sg) + sg_dma_len(dst_sg) - dst_avail;
- src = sg_dma_address(src_sg) + sg_dma_len(src_sg) - src_avail;
-
- /* allocate and populate the descriptor */
- new = fsl_dma_alloc_descriptor(chan);
- if (!new) {
- chan_err(chan, "%s\n", msg_ld_oom);
- goto fail;
- }
-
- set_desc_cnt(chan, &new->hw, len);
- set_desc_src(chan, &new->hw, src);
- set_desc_dst(chan, &new->hw, dst);
-
- if (!first)
- first = new;
- else
- set_desc_next(chan, &prev->hw, new->async_tx.phys);
-
- new->async_tx.cookie = 0;
- async_tx_ack(&new->async_tx);
- prev = new;
-
- /* Insert the link descriptor to the LD ring */
- list_add_tail(&new->node, &first->tx_list);
-
- /* update metadata */
- dst_avail -= len;
- src_avail -= len;
-
-fetch:
- /* fetch the next dst scatterlist entry */
- if (dst_avail == 0) {
-
- /* no more entries: we're done */
- if (dst_nents == 0)
- break;
-
- /* fetch the next entry: if there are no more: done */
- dst_sg = sg_next(dst_sg);
- if (dst_sg == NULL)
- break;
-
- dst_nents--;
- dst_avail = sg_dma_len(dst_sg);
- }
-
- /* fetch the next src scatterlist entry */
- if (src_avail == 0) {
-
- /* no more entries: we're done */
- if (src_nents == 0)
- break;
-
- /* fetch the next entry: if there are no more: done */
- src_sg = sg_next(src_sg);
- if (src_sg == NULL)
- break;
-
- src_nents--;
- src_avail = sg_dma_len(src_sg);
- }
- }
-
- new->async_tx.flags = flags; /* client is in control of this ack */
- new->async_tx.cookie = -EBUSY;
-
- /* Set End-of-link to the last link descriptor of new list */
- set_ld_eol(chan, new);
-
- return &first->async_tx;
-
-fail:
- if (!first)
- return NULL;
-
- fsldma_free_desc_list_reverse(chan, &first->tx_list);
- return NULL;
-}
-
static int fsl_dma_device_terminate_all(struct dma_chan *dchan)
{
struct fsldma_chan *chan;
@@ -1357,12 +1241,10 @@ static int fsldma_of_probe(struct platform_device *op)
fdev->irq = irq_of_parse_and_map(op->dev.of_node, 0);
dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask);
- dma_cap_set(DMA_SG, fdev->common.cap_mask);
dma_cap_set(DMA_SLAVE, fdev->common.cap_mask);
fdev->common.device_alloc_chan_resources = fsl_dma_alloc_chan_resources;
fdev->common.device_free_chan_resources = fsl_dma_free_chan_resources;
fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy;
- fdev->common.device_prep_dma_sg = fsl_dma_prep_sg;
fdev->common.device_tx_status = fsl_tx_status;
fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending;
fdev->common.device_config = fsl_dma_device_config;
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index a371b07a0981..f70cc74032ea 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -644,9 +644,13 @@ static void __cleanup(struct ioatdma_chan *ioat_chan, dma_addr_t phys_complete)
mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
}
- /* 5 microsecond delay per pending descriptor */
- writew(min((5 * (active - i)), IOAT_INTRDELAY_MASK),
- ioat_chan->ioat_dma->reg_base + IOAT_INTRDELAY_OFFSET);
+ /* microsecond delay by sysfs variable per pending descriptor */
+ if (ioat_chan->intr_coalesce != ioat_chan->prev_intr_coalesce) {
+ writew(min((ioat_chan->intr_coalesce * (active - i)),
+ IOAT_INTRDELAY_MASK),
+ ioat_chan->ioat_dma->reg_base + IOAT_INTRDELAY_OFFSET);
+ ioat_chan->prev_intr_coalesce = ioat_chan->intr_coalesce;
+ }
}
static void ioat_cleanup(struct ioatdma_chan *ioat_chan)
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
index a9bc1a15b0d1..56200eefcf5e 100644
--- a/drivers/dma/ioat/dma.h
+++ b/drivers/dma/ioat/dma.h
@@ -142,11 +142,14 @@ struct ioatdma_chan {
spinlock_t prep_lock;
struct ioat_descs descs[2];
int desc_chunks;
+ int intr_coalesce;
+ int prev_intr_coalesce;
};
struct ioat_sysfs_entry {
struct attribute attr;
ssize_t (*show)(struct dma_chan *, char *);
+ ssize_t (*store)(struct dma_chan *, const char *, size_t);
};
/**
diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c
index ed8ed1192775..93e006c3441d 100644
--- a/drivers/dma/ioat/init.c
+++ b/drivers/dma/ioat/init.c
@@ -39,7 +39,7 @@ MODULE_VERSION(IOAT_DMA_VERSION);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Intel Corporation");
-static struct pci_device_id ioat_pci_tbl[] = {
+static const struct pci_device_id ioat_pci_tbl[] = {
/* I/OAT v3 platforms */
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG0) },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG1) },
diff --git a/drivers/dma/ioat/sysfs.c b/drivers/dma/ioat/sysfs.c
index cb4a857ee21b..3ac677f29e8f 100644
--- a/drivers/dma/ioat/sysfs.c
+++ b/drivers/dma/ioat/sysfs.c
@@ -64,8 +64,24 @@ ioat_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
return entry->show(&ioat_chan->dma_chan, page);
}
+static ssize_t
+ioat_attr_store(struct kobject *kobj, struct attribute *attr,
+const char *page, size_t count)
+{
+ struct ioat_sysfs_entry *entry;
+ struct ioatdma_chan *ioat_chan;
+
+ entry = container_of(attr, struct ioat_sysfs_entry, attr);
+ ioat_chan = container_of(kobj, struct ioatdma_chan, kobj);
+
+ if (!entry->store)
+ return -EIO;
+ return entry->store(&ioat_chan->dma_chan, page, count);
+}
+
const struct sysfs_ops ioat_sysfs_ops = {
.show = ioat_attr_show,
+ .store = ioat_attr_store,
};
void ioat_kobject_add(struct ioatdma_device *ioat_dma, struct kobj_type *type)
@@ -121,11 +137,37 @@ static ssize_t ring_active_show(struct dma_chan *c, char *page)
}
static struct ioat_sysfs_entry ring_active_attr = __ATTR_RO(ring_active);
+static ssize_t intr_coalesce_show(struct dma_chan *c, char *page)
+{
+ struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
+
+ return sprintf(page, "%d\n", ioat_chan->intr_coalesce);
+}
+
+static ssize_t intr_coalesce_store(struct dma_chan *c, const char *page,
+size_t count)
+{
+ int intr_coalesce = 0;
+ struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
+
+ if (sscanf(page, "%du", &intr_coalesce) != -1) {
+ if ((intr_coalesce < 0) ||
+ (intr_coalesce > IOAT_INTRDELAY_MASK))
+ return -EINVAL;
+ ioat_chan->intr_coalesce = intr_coalesce;
+ }
+
+ return count;
+}
+
+static struct ioat_sysfs_entry intr_coalesce_attr = __ATTR_RW(intr_coalesce);
+
static struct attribute *ioat_attrs[] = {
&ring_size_attr.attr,
&ring_active_attr.attr,
&ioat_cap_attr.attr,
&ioat_version_attr.attr,
+ &intr_coalesce_attr.attr,
NULL,
};
diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c
index 01e25c68dd5a..01d2a750a621 100644
--- a/drivers/dma/k3dma.c
+++ b/drivers/dma/k3dma.c
@@ -223,7 +223,6 @@ static irqreturn_t k3_dma_int_handler(int irq, void *dev_id)
if (c && (tc1 & BIT(i))) {
spin_lock_irqsave(&c->vc.lock, flags);
vchan_cookie_complete(&p->ds_run->vd);
- WARN_ON_ONCE(p->ds_done);
p->ds_done = p->ds_run;
p->ds_run = NULL;
spin_unlock_irqrestore(&c->vc.lock, flags);
@@ -274,13 +273,14 @@ static int k3_dma_start_txd(struct k3_dma_chan *c)
*/
list_del(&ds->vd.node);
- WARN_ON_ONCE(c->phy->ds_run);
- WARN_ON_ONCE(c->phy->ds_done);
c->phy->ds_run = ds;
+ c->phy->ds_done = NULL;
/* start dma */
k3_dma_set_desc(c->phy, &ds->desc_hw[0]);
return 0;
}
+ c->phy->ds_run = NULL;
+ c->phy->ds_done = NULL;
return -EAGAIN;
}
@@ -722,11 +722,7 @@ static int k3_dma_terminate_all(struct dma_chan *chan)
k3_dma_free_desc(&p->ds_run->vd);
p->ds_run = NULL;
}
- if (p->ds_done) {
- k3_dma_free_desc(&p->ds_done->vd);
- p->ds_done = NULL;
- }
-
+ p->ds_done = NULL;
}
spin_unlock_irqrestore(&c->vc.lock, flags);
vchan_dma_desc_free_list(&c->vc, &head);
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index 25bc5b103aa2..1993889003fd 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -68,36 +68,6 @@ static void mv_desc_init(struct mv_xor_desc_slot *desc,
hw_desc->byte_count = byte_count;
}
-/* Populate the descriptor */
-static void mv_xor_config_sg_ll_desc(struct mv_xor_desc_slot *desc,
- dma_addr_t dma_src, dma_addr_t dma_dst,
- u32 len, struct mv_xor_desc_slot *prev)
-{
- struct mv_xor_desc *hw_desc = desc->hw_desc;
-
- hw_desc->status = XOR_DESC_DMA_OWNED;
- hw_desc->phy_next_desc = 0;
- /* Configure for XOR with only one src address -> MEMCPY */
- hw_desc->desc_command = XOR_DESC_OPERATION_XOR | (0x1 << 0);
- hw_desc->phy_dest_addr = dma_dst;
- hw_desc->phy_src_addr[0] = dma_src;
- hw_desc->byte_count = len;
-
- if (prev) {
- struct mv_xor_desc *hw_prev = prev->hw_desc;
-
- hw_prev->phy_next_desc = desc->async_tx.phys;
- }
-}
-
-static void mv_xor_desc_config_eod(struct mv_xor_desc_slot *desc)
-{
- struct mv_xor_desc *hw_desc = desc->hw_desc;
-
- /* Enable end-of-descriptor interrupt */
- hw_desc->desc_command |= XOR_DESC_EOD_INT_EN;
-}
-
static void mv_desc_set_mode(struct mv_xor_desc_slot *desc)
{
struct mv_xor_desc *hw_desc = desc->hw_desc;
@@ -662,132 +632,6 @@ mv_xor_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags)
return mv_xor_prep_dma_xor(chan, dest, &src, 1, len, flags);
}
-/**
- * mv_xor_prep_dma_sg - prepare descriptors for a memory sg transaction
- * @chan: DMA channel
- * @dst_sg: Destination scatter list
- * @dst_sg_len: Number of entries in destination scatter list
- * @src_sg: Source scatter list
- * @src_sg_len: Number of entries in source scatter list
- * @flags: transfer ack flags
- *
- * Return: Async transaction descriptor on success and NULL on failure
- */
-static struct dma_async_tx_descriptor *
-mv_xor_prep_dma_sg(struct dma_chan *chan, struct scatterlist *dst_sg,
- unsigned int dst_sg_len, struct scatterlist *src_sg,
- unsigned int src_sg_len, unsigned long flags)
-{
- struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
- struct mv_xor_desc_slot *new;
- struct mv_xor_desc_slot *first = NULL;
- struct mv_xor_desc_slot *prev = NULL;
- size_t len, dst_avail, src_avail;
- dma_addr_t dma_dst, dma_src;
- int desc_cnt = 0;
- int ret;
-
- dev_dbg(mv_chan_to_devp(mv_chan),
- "%s dst_sg_len: %d src_sg_len: %d flags: %ld\n",
- __func__, dst_sg_len, src_sg_len, flags);
-
- dst_avail = sg_dma_len(dst_sg);
- src_avail = sg_dma_len(src_sg);
-
- /* Run until we are out of scatterlist entries */
- while (true) {
- /* Allocate and populate the descriptor */
- desc_cnt++;
- new = mv_chan_alloc_slot(mv_chan);
- if (!new) {
- dev_err(mv_chan_to_devp(mv_chan),
- "Out of descriptors (desc_cnt=%d)!\n",
- desc_cnt);
- goto err;
- }
-
- len = min_t(size_t, src_avail, dst_avail);
- len = min_t(size_t, len, MV_XOR_MAX_BYTE_COUNT);
- if (len == 0)
- goto fetch;
-
- if (len < MV_XOR_MIN_BYTE_COUNT) {
- dev_err(mv_chan_to_devp(mv_chan),
- "Transfer size of %zu too small!\n", len);
- goto err;
- }
-
- dma_dst = sg_dma_address(dst_sg) + sg_dma_len(dst_sg) -
- dst_avail;
- dma_src = sg_dma_address(src_sg) + sg_dma_len(src_sg) -
- src_avail;
-
- /* Check if a new window needs to get added for 'dst' */
- ret = mv_xor_add_io_win(mv_chan, dma_dst);
- if (ret)
- goto err;
-
- /* Check if a new window needs to get added for 'src' */
- ret = mv_xor_add_io_win(mv_chan, dma_src);
- if (ret)
- goto err;
-
- /* Populate the descriptor */
- mv_xor_config_sg_ll_desc(new, dma_src, dma_dst, len, prev);
- prev = new;
- dst_avail -= len;
- src_avail -= len;
-
- if (!first)
- first = new;
- else
- list_move_tail(&new->node, &first->sg_tx_list);
-
-fetch:
- /* Fetch the next dst scatterlist entry */
- if (dst_avail == 0) {
- if (dst_sg_len == 0)
- break;
-
- /* Fetch the next entry: if there are no more: done */
- dst_sg = sg_next(dst_sg);
- if (dst_sg == NULL)
- break;
-
- dst_sg_len--;
- dst_avail = sg_dma_len(dst_sg);
- }
-
- /* Fetch the next src scatterlist entry */
- if (src_avail == 0) {
- if (src_sg_len == 0)
- break;
-
- /* Fetch the next entry: if there are no more: done */
- src_sg = sg_next(src_sg);
- if (src_sg == NULL)
- break;
-
- src_sg_len--;
- src_avail = sg_dma_len(src_sg);
- }
- }
-
- /* Set the EOD flag in the last descriptor */
- mv_xor_desc_config_eod(new);
- first->async_tx.flags = flags;
-
- return &first->async_tx;
-
-err:
- /* Cleanup: Move all descriptors back into the free list */
- spin_lock_bh(&mv_chan->lock);
- mv_desc_clean_slot(first, mv_chan);
- spin_unlock_bh(&mv_chan->lock);
-
- return NULL;
-}
-
static void mv_xor_free_chan_resources(struct dma_chan *chan)
{
struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
@@ -1254,8 +1098,6 @@ mv_xor_channel_add(struct mv_xor_device *xordev,
dma_dev->device_prep_dma_interrupt = mv_xor_prep_dma_interrupt;
if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask))
dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy;
- if (dma_has_cap(DMA_SG, dma_dev->cap_mask))
- dma_dev->device_prep_dma_sg = mv_xor_prep_dma_sg;
if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
dma_dev->max_xor = 8;
dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor;
@@ -1305,11 +1147,10 @@ mv_xor_channel_add(struct mv_xor_device *xordev,
goto err_free_irq;
}
- dev_info(&pdev->dev, "Marvell XOR (%s): ( %s%s%s%s)\n",
+ dev_info(&pdev->dev, "Marvell XOR (%s): ( %s%s%s)\n",
mv_chan->op_in_desc ? "Descriptor Mode" : "Registers Mode",
dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "",
- dma_has_cap(DMA_SG, dma_dev->cap_mask) ? "sg " : "",
dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
dma_async_device_register(dma_dev);
@@ -1552,7 +1393,6 @@ static int mv_xor_probe(struct platform_device *pdev)
dma_cap_zero(cap_mask);
dma_cap_set(DMA_MEMCPY, cap_mask);
- dma_cap_set(DMA_SG, cap_mask);
dma_cap_set(DMA_XOR, cap_mask);
dma_cap_set(DMA_INTERRUPT, cap_mask);
diff --git a/drivers/dma/nbpfaxi.c b/drivers/dma/nbpfaxi.c
index 3f45b9bdf201..d3f918a9ee76 100644
--- a/drivers/dma/nbpfaxi.c
+++ b/drivers/dma/nbpfaxi.c
@@ -1005,21 +1005,6 @@ static struct dma_async_tx_descriptor *nbpf_prep_memcpy(
DMA_MEM_TO_MEM, flags);
}
-static struct dma_async_tx_descriptor *nbpf_prep_memcpy_sg(
- struct dma_chan *dchan,
- struct scatterlist *dst_sg, unsigned int dst_nents,
- struct scatterlist *src_sg, unsigned int src_nents,
- unsigned long flags)
-{
- struct nbpf_channel *chan = nbpf_to_chan(dchan);
-
- if (dst_nents != src_nents)
- return NULL;
-
- return nbpf_prep_sg(chan, src_sg, dst_sg, src_nents,
- DMA_MEM_TO_MEM, flags);
-}
-
static struct dma_async_tx_descriptor *nbpf_prep_slave_sg(
struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len,
enum dma_transfer_direction direction, unsigned long flags, void *context)
@@ -1417,13 +1402,11 @@ static int nbpf_probe(struct platform_device *pdev)
dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask);
- dma_cap_set(DMA_SG, dma_dev->cap_mask);
/* Common and MEMCPY operations */
dma_dev->device_alloc_chan_resources
= nbpf_alloc_chan_resources;
dma_dev->device_free_chan_resources = nbpf_free_chan_resources;
- dma_dev->device_prep_dma_sg = nbpf_prep_memcpy_sg;
dma_dev->device_prep_dma_memcpy = nbpf_prep_memcpy;
dma_dev->device_tx_status = nbpf_tx_status;
dma_dev->device_issue_pending = nbpf_issue_pending;
diff --git a/drivers/dma/of-dma.c b/drivers/dma/of-dma.c
index faae0bfe1109..91fd395c90c4 100644
--- a/drivers/dma/of-dma.c
+++ b/drivers/dma/of-dma.c
@@ -38,8 +38,8 @@ static struct of_dma *of_dma_find_controller(struct of_phandle_args *dma_spec)
if (ofdma->of_node == dma_spec->np)
return ofdma;
- pr_debug("%s: can't find DMA controller %s\n", __func__,
- dma_spec->np->full_name);
+ pr_debug("%s: can't find DMA controller %pOF\n", __func__,
+ dma_spec->np);
return NULL;
}
@@ -255,8 +255,8 @@ struct dma_chan *of_dma_request_slave_channel(struct device_node *np,
count = of_property_count_strings(np, "dma-names");
if (count < 0) {
- pr_err("%s: dma-names property of node '%s' missing or empty\n",
- __func__, np->full_name);
+ pr_err("%s: dma-names property of node '%pOF' missing or empty\n",
+ __func__, np);
return ERR_PTR(-ENODEV);
}
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index b19ee04567b5..f122c2a7b9f0 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -3023,7 +3023,7 @@ static int pl330_remove(struct amba_device *adev)
return 0;
}
-static struct amba_id pl330_ids[] = {
+static const struct amba_id pl330_ids[] = {
{
.id = 0x00041330,
.mask = 0x000fffff,
diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c
index b1535b1fe95c..4cf0d4d0cecf 100644
--- a/drivers/dma/ppc4xx/adma.c
+++ b/drivers/dma/ppc4xx/adma.c
@@ -4040,9 +4040,9 @@ static int ppc440spe_adma_probe(struct platform_device *ofdev)
/* it is DMA0 or DMA1 */
idx = of_get_property(np, "cell-index", &len);
if (!idx || (len != sizeof(u32))) {
- dev_err(&ofdev->dev, "Device node %s has missing "
+ dev_err(&ofdev->dev, "Device node %pOF has missing "
"or invalid cell-index property\n",
- np->full_name);
+ np);
return -EINVAL;
}
id = *idx;
@@ -4307,7 +4307,7 @@ static int ppc440spe_adma_remove(struct platform_device *ofdev)
* "poly" allows setting/checking used polynomial (for PPC440SPe only).
*/
-static ssize_t show_ppc440spe_devices(struct device_driver *dev, char *buf)
+static ssize_t devices_show(struct device_driver *dev, char *buf)
{
ssize_t size = 0;
int i;
@@ -4321,16 +4321,17 @@ static ssize_t show_ppc440spe_devices(struct device_driver *dev, char *buf)
}
return size;
}
+static DRIVER_ATTR_RO(devices);
-static ssize_t show_ppc440spe_r6enable(struct device_driver *dev, char *buf)
+static ssize_t enable_show(struct device_driver *dev, char *buf)
{
return snprintf(buf, PAGE_SIZE,
"PPC440SP(e) RAID-6 capabilities are %sABLED.\n",
ppc440spe_r6_enabled ? "EN" : "DIS");
}
-static ssize_t store_ppc440spe_r6enable(struct device_driver *dev,
- const char *buf, size_t count)
+static ssize_t enable_store(struct device_driver *dev, const char *buf,
+ size_t count)
{
unsigned long val;
@@ -4357,8 +4358,9 @@ static ssize_t store_ppc440spe_r6enable(struct device_driver *dev,
}
return count;
}
+static DRIVER_ATTR_RW(enable);
-static ssize_t show_ppc440spe_r6poly(struct device_driver *dev, char *buf)
+static ssize_t poly_store(struct device_driver *dev, char *buf)
{
ssize_t size = 0;
u32 reg;
@@ -4377,8 +4379,8 @@ static ssize_t show_ppc440spe_r6poly(struct device_driver *dev, char *buf)
return size;
}
-static ssize_t store_ppc440spe_r6poly(struct device_driver *dev,
- const char *buf, size_t count)
+static ssize_t poly_store(struct device_driver *dev, const char *buf,
+ size_t count)
{
unsigned long reg, val;
@@ -4404,12 +4406,7 @@ static ssize_t store_ppc440spe_r6poly(struct device_driver *dev,
return count;
}
-
-static DRIVER_ATTR(devices, S_IRUGO, show_ppc440spe_devices, NULL);
-static DRIVER_ATTR(enable, S_IRUGO | S_IWUSR, show_ppc440spe_r6enable,
- store_ppc440spe_r6enable);
-static DRIVER_ATTR(poly, S_IRUGO | S_IWUSR, show_ppc440spe_r6poly,
- store_ppc440spe_r6poly);
+static DRIVER_ATTR_RW(poly);
/*
* Common initialisation for RAID engines; allocate memory for
@@ -4448,8 +4445,7 @@ static int ppc440spe_configure_raid_devices(void)
dcr_base = dcr_resource_start(np, 0);
dcr_len = dcr_resource_len(np, 0);
if (!dcr_base && !dcr_len) {
- pr_err("%s: can't get DCR registers base/len!\n",
- np->full_name);
+ pr_err("%pOF: can't get DCR registers base/len!\n", np);
of_node_put(np);
iounmap(i2o_reg);
return -ENODEV;
@@ -4457,7 +4453,7 @@ static int ppc440spe_configure_raid_devices(void)
i2o_dcr_host = dcr_map(np, dcr_base, dcr_len);
if (!DCR_MAP_OK(i2o_dcr_host)) {
- pr_err("%s: failed to map DCRs!\n", np->full_name);
+ pr_err("%pOF: failed to map DCRs!\n", np);
of_node_put(np);
iounmap(i2o_reg);
return -ENODEV;
@@ -4518,15 +4514,14 @@ static int ppc440spe_configure_raid_devices(void)
dcr_base = dcr_resource_start(np, 0);
dcr_len = dcr_resource_len(np, 0);
if (!dcr_base && !dcr_len) {
- pr_err("%s: can't get DCR registers base/len!\n",
- np->full_name);
+ pr_err("%pOF: can't get DCR registers base/len!\n", np);
ret = -ENODEV;
goto out_mq;
}
ppc440spe_mq_dcr_host = dcr_map(np, dcr_base, dcr_len);
if (!DCR_MAP_OK(ppc440spe_mq_dcr_host)) {
- pr_err("%s: failed to map DCRs!\n", np->full_name);
+ pr_err("%pOF: failed to map DCRs!\n", np);
ret = -ENODEV;
goto out_mq;
}
diff --git a/drivers/dma/qcom/bam_dma.c b/drivers/dma/qcom/bam_dma.c
index 03c4eb3fd314..6d89fb6a6a92 100644
--- a/drivers/dma/qcom/bam_dma.c
+++ b/drivers/dma/qcom/bam_dma.c
@@ -65,6 +65,7 @@ struct bam_desc_hw {
#define DESC_FLAG_EOT BIT(14)
#define DESC_FLAG_EOB BIT(13)
#define DESC_FLAG_NWD BIT(12)
+#define DESC_FLAG_CMD BIT(11)
struct bam_async_desc {
struct virt_dma_desc vd;
@@ -645,6 +646,9 @@ static struct dma_async_tx_descriptor *bam_prep_slave_sg(struct dma_chan *chan,
unsigned int curr_offset = 0;
do {
+ if (flags & DMA_PREP_CMD)
+ desc->flags |= cpu_to_le16(DESC_FLAG_CMD);
+
desc->addr = cpu_to_le32(sg_dma_address(sg) +
curr_offset);
@@ -960,7 +964,7 @@ static void bam_start_dma(struct bam_chan *bchan)
/* set any special flags on the last descriptor */
if (async_desc->num_desc == async_desc->xfer_len)
- desc[async_desc->xfer_len - 1].flags =
+ desc[async_desc->xfer_len - 1].flags |=
cpu_to_le16(async_desc->flags);
else
desc[async_desc->xfer_len - 1].flags |=
diff --git a/drivers/dma/qcom/hidma.c b/drivers/dma/qcom/hidma.c
index 34fb6afd229b..e3669850aef4 100644
--- a/drivers/dma/qcom/hidma.c
+++ b/drivers/dma/qcom/hidma.c
@@ -411,7 +411,40 @@ hidma_prep_dma_memcpy(struct dma_chan *dmach, dma_addr_t dest, dma_addr_t src,
return NULL;
hidma_ll_set_transfer_params(mdma->lldev, mdesc->tre_ch,
- src, dest, len, flags);
+ src, dest, len, flags,
+ HIDMA_TRE_MEMCPY);
+
+ /* Place descriptor in prepared list */
+ spin_lock_irqsave(&mchan->lock, irqflags);
+ list_add_tail(&mdesc->node, &mchan->prepared);
+ spin_unlock_irqrestore(&mchan->lock, irqflags);
+
+ return &mdesc->desc;
+}
+
+static struct dma_async_tx_descriptor *
+hidma_prep_dma_memset(struct dma_chan *dmach, dma_addr_t dest, int value,
+ size_t len, unsigned long flags)
+{
+ struct hidma_chan *mchan = to_hidma_chan(dmach);
+ struct hidma_desc *mdesc = NULL;
+ struct hidma_dev *mdma = mchan->dmadev;
+ unsigned long irqflags;
+
+ /* Get free descriptor */
+ spin_lock_irqsave(&mchan->lock, irqflags);
+ if (!list_empty(&mchan->free)) {
+ mdesc = list_first_entry(&mchan->free, struct hidma_desc, node);
+ list_del(&mdesc->node);
+ }
+ spin_unlock_irqrestore(&mchan->lock, irqflags);
+
+ if (!mdesc)
+ return NULL;
+
+ hidma_ll_set_transfer_params(mdma->lldev, mdesc->tre_ch,
+ value, dest, len, flags,
+ HIDMA_TRE_MEMSET);
/* Place descriptor in prepared list */
spin_lock_irqsave(&mchan->lock, irqflags);
@@ -776,6 +809,7 @@ static int hidma_probe(struct platform_device *pdev)
pm_runtime_get_sync(dmadev->ddev.dev);
dma_cap_set(DMA_MEMCPY, dmadev->ddev.cap_mask);
+ dma_cap_set(DMA_MEMSET, dmadev->ddev.cap_mask);
if (WARN_ON(!pdev->dev.dma_mask)) {
rc = -ENXIO;
goto dmafree;
@@ -786,6 +820,7 @@ static int hidma_probe(struct platform_device *pdev)
dmadev->dev_trca = trca;
dmadev->trca_resource = trca_resource;
dmadev->ddev.device_prep_dma_memcpy = hidma_prep_dma_memcpy;
+ dmadev->ddev.device_prep_dma_memset = hidma_prep_dma_memset;
dmadev->ddev.device_alloc_chan_resources = hidma_alloc_chan_resources;
dmadev->ddev.device_free_chan_resources = hidma_free_chan_resources;
dmadev->ddev.device_tx_status = hidma_tx_status;
diff --git a/drivers/dma/qcom/hidma.h b/drivers/dma/qcom/hidma.h
index 41e0aa283828..5f9966e82c0b 100644
--- a/drivers/dma/qcom/hidma.h
+++ b/drivers/dma/qcom/hidma.h
@@ -28,6 +28,11 @@
#define HIDMA_TRE_DEST_LOW_IDX 4
#define HIDMA_TRE_DEST_HI_IDX 5
+enum tre_type {
+ HIDMA_TRE_MEMCPY = 3,
+ HIDMA_TRE_MEMSET = 4,
+};
+
struct hidma_tre {
atomic_t allocated; /* if this channel is allocated */
bool queued; /* flag whether this is pending */
@@ -150,7 +155,7 @@ void hidma_ll_start(struct hidma_lldev *llhndl);
int hidma_ll_disable(struct hidma_lldev *lldev);
int hidma_ll_enable(struct hidma_lldev *llhndl);
void hidma_ll_set_transfer_params(struct hidma_lldev *llhndl, u32 tre_ch,
- dma_addr_t src, dma_addr_t dest, u32 len, u32 flags);
+ dma_addr_t src, dma_addr_t dest, u32 len, u32 flags, u32 txntype);
void hidma_ll_setup_irq(struct hidma_lldev *lldev, bool msi);
int hidma_ll_setup(struct hidma_lldev *lldev);
struct hidma_lldev *hidma_ll_init(struct device *dev, u32 max_channels,
diff --git a/drivers/dma/qcom/hidma_ll.c b/drivers/dma/qcom/hidma_ll.c
index 1530a661518d..4999e266b2de 100644
--- a/drivers/dma/qcom/hidma_ll.c
+++ b/drivers/dma/qcom/hidma_ll.c
@@ -105,10 +105,6 @@ enum ch_state {
HIDMA_CH_STOPPED = 4,
};
-enum tre_type {
- HIDMA_TRE_MEMCPY = 3,
-};
-
enum err_code {
HIDMA_EVRE_STATUS_COMPLETE = 1,
HIDMA_EVRE_STATUS_ERROR = 4,
@@ -174,8 +170,7 @@ int hidma_ll_request(struct hidma_lldev *lldev, u32 sig, const char *dev_name,
tre->err_info = 0;
tre->lldev = lldev;
tre_local = &tre->tre_local[0];
- tre_local[HIDMA_TRE_CFG_IDX] = HIDMA_TRE_MEMCPY;
- tre_local[HIDMA_TRE_CFG_IDX] |= (lldev->chidx & 0xFF) << 8;
+ tre_local[HIDMA_TRE_CFG_IDX] = (lldev->chidx & 0xFF) << 8;
tre_local[HIDMA_TRE_CFG_IDX] |= BIT(16); /* set IEOB */
*tre_ch = i;
if (callback)
@@ -607,7 +602,7 @@ int hidma_ll_disable(struct hidma_lldev *lldev)
void hidma_ll_set_transfer_params(struct hidma_lldev *lldev, u32 tre_ch,
dma_addr_t src, dma_addr_t dest, u32 len,
- u32 flags)
+ u32 flags, u32 txntype)
{
struct hidma_tre *tre;
u32 *tre_local;
@@ -626,6 +621,8 @@ void hidma_ll_set_transfer_params(struct hidma_lldev *lldev, u32 tre_ch,
}
tre_local = &tre->tre_local[0];
+ tre_local[HIDMA_TRE_CFG_IDX] &= ~GENMASK(7, 0);
+ tre_local[HIDMA_TRE_CFG_IDX] |= txntype;
tre_local[HIDMA_TRE_LEN_IDX] = len;
tre_local[HIDMA_TRE_SRC_LOW_IDX] = lower_32_bits(src);
tre_local[HIDMA_TRE_SRC_HI_IDX] = upper_32_bits(src);
diff --git a/drivers/dma/qcom/hidma_mgmt.c b/drivers/dma/qcom/hidma_mgmt.c
index 5a0991bc4787..7335e2eb9b72 100644
--- a/drivers/dma/qcom/hidma_mgmt.c
+++ b/drivers/dma/qcom/hidma_mgmt.c
@@ -28,7 +28,7 @@
#include "hidma_mgmt.h"
-#define HIDMA_QOS_N_OFFSET 0x300
+#define HIDMA_QOS_N_OFFSET 0x700
#define HIDMA_CFG_OFFSET 0x400
#define HIDMA_MAX_BUS_REQ_LEN_OFFSET 0x41C
#define HIDMA_MAX_XACTIONS_OFFSET 0x420
@@ -227,7 +227,8 @@ static int hidma_mgmt_probe(struct platform_device *pdev)
goto out;
}
- if (max_write_request) {
+ if (max_write_request &&
+ (max_write_request != mgmtdev->max_write_request)) {
dev_info(&pdev->dev, "overriding max-write-burst-bytes: %d\n",
max_write_request);
mgmtdev->max_write_request = max_write_request;
@@ -240,7 +241,8 @@ static int hidma_mgmt_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "max-read-burst-bytes missing\n");
goto out;
}
- if (max_read_request) {
+ if (max_read_request &&
+ (max_read_request != mgmtdev->max_read_request)) {
dev_info(&pdev->dev, "overriding max-read-burst-bytes: %d\n",
max_read_request);
mgmtdev->max_read_request = max_read_request;
@@ -253,7 +255,8 @@ static int hidma_mgmt_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "max-write-transactions missing\n");
goto out;
}
- if (max_wr_xactions) {
+ if (max_wr_xactions &&
+ (max_wr_xactions != mgmtdev->max_wr_xactions)) {
dev_info(&pdev->dev, "overriding max-write-transactions: %d\n",
max_wr_xactions);
mgmtdev->max_wr_xactions = max_wr_xactions;
@@ -266,7 +269,8 @@ static int hidma_mgmt_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "max-read-transactions missing\n");
goto out;
}
- if (max_rd_xactions) {
+ if (max_rd_xactions &&
+ (max_rd_xactions != mgmtdev->max_rd_xactions)) {
dev_info(&pdev->dev, "overriding max-read-transactions: %d\n",
max_rd_xactions);
mgmtdev->max_rd_xactions = max_rd_xactions;
@@ -354,7 +358,7 @@ static int __init hidma_mgmt_of_populate_channels(struct device_node *np)
struct platform_device_info pdevinfo;
struct of_phandle_args out_irq;
struct device_node *child;
- struct resource *res;
+ struct resource *res = NULL;
const __be32 *cell;
int ret = 0, size, i, num;
u64 addr, addr_size;
diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c
index ffcadca53243..2b2c7db3e480 100644
--- a/drivers/dma/sh/rcar-dmac.c
+++ b/drivers/dma/sh/rcar-dmac.c
@@ -1690,6 +1690,15 @@ static int rcar_dmac_chan_probe(struct rcar_dmac *dmac,
if (!irqname)
return -ENOMEM;
+ /*
+ * Initialize the DMA engine channel and add it to the DMA engine
+ * channels list.
+ */
+ chan->device = &dmac->engine;
+ dma_cookie_init(chan);
+
+ list_add_tail(&chan->device_node, &dmac->engine.channels);
+
ret = devm_request_threaded_irq(dmac->dev, rchan->irq,
rcar_dmac_isr_channel,
rcar_dmac_isr_channel_thread, 0,
@@ -1700,15 +1709,6 @@ static int rcar_dmac_chan_probe(struct rcar_dmac *dmac,
return ret;
}
- /*
- * Initialize the DMA engine channel and add it to the DMA engine
- * channels list.
- */
- chan->device = &dmac->engine;
- dma_cookie_init(chan);
-
- list_add_tail(&chan->device_node, &dmac->engine.channels);
-
return 0;
}
@@ -1794,14 +1794,6 @@ static int rcar_dmac_probe(struct platform_device *pdev)
if (!irqname)
return -ENOMEM;
- ret = devm_request_irq(&pdev->dev, irq, rcar_dmac_isr_error, 0,
- irqname, dmac);
- if (ret) {
- dev_err(&pdev->dev, "failed to request IRQ %u (%d)\n",
- irq, ret);
- return ret;
- }
-
/* Enable runtime PM and initialize the device. */
pm_runtime_enable(&pdev->dev);
ret = pm_runtime_get_sync(&pdev->dev);
@@ -1818,8 +1810,32 @@ static int rcar_dmac_probe(struct platform_device *pdev)
goto error;
}
- /* Initialize the channels. */
- INIT_LIST_HEAD(&dmac->engine.channels);
+ /* Initialize engine */
+ engine = &dmac->engine;
+
+ dma_cap_set(DMA_MEMCPY, engine->cap_mask);
+ dma_cap_set(DMA_SLAVE, engine->cap_mask);
+
+ engine->dev = &pdev->dev;
+ engine->copy_align = ilog2(RCAR_DMAC_MEMCPY_XFER_SIZE);
+
+ engine->src_addr_widths = widths;
+ engine->dst_addr_widths = widths;
+ engine->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
+ engine->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
+
+ engine->device_alloc_chan_resources = rcar_dmac_alloc_chan_resources;
+ engine->device_free_chan_resources = rcar_dmac_free_chan_resources;
+ engine->device_prep_dma_memcpy = rcar_dmac_prep_dma_memcpy;
+ engine->device_prep_slave_sg = rcar_dmac_prep_slave_sg;
+ engine->device_prep_dma_cyclic = rcar_dmac_prep_dma_cyclic;
+ engine->device_config = rcar_dmac_device_config;
+ engine->device_terminate_all = rcar_dmac_chan_terminate_all;
+ engine->device_tx_status = rcar_dmac_tx_status;
+ engine->device_issue_pending = rcar_dmac_issue_pending;
+ engine->device_synchronize = rcar_dmac_device_synchronize;
+
+ INIT_LIST_HEAD(&engine->channels);
for (i = 0; i < dmac->n_channels; ++i) {
ret = rcar_dmac_chan_probe(dmac, &dmac->channels[i],
@@ -1828,6 +1844,14 @@ static int rcar_dmac_probe(struct platform_device *pdev)
goto error;
}
+ ret = devm_request_irq(&pdev->dev, irq, rcar_dmac_isr_error, 0,
+ irqname, dmac);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to request IRQ %u (%d)\n",
+ irq, ret);
+ return ret;
+ }
+
/* Register the DMAC as a DMA provider for DT. */
ret = of_dma_controller_register(pdev->dev.of_node, rcar_dmac_of_xlate,
NULL);
@@ -1839,29 +1863,6 @@ static int rcar_dmac_probe(struct platform_device *pdev)
*
* Default transfer size of 32 bytes requires 32-byte alignment.
*/
- engine = &dmac->engine;
- dma_cap_set(DMA_MEMCPY, engine->cap_mask);
- dma_cap_set(DMA_SLAVE, engine->cap_mask);
-
- engine->dev = &pdev->dev;
- engine->copy_align = ilog2(RCAR_DMAC_MEMCPY_XFER_SIZE);
-
- engine->src_addr_widths = widths;
- engine->dst_addr_widths = widths;
- engine->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
- engine->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
-
- engine->device_alloc_chan_resources = rcar_dmac_alloc_chan_resources;
- engine->device_free_chan_resources = rcar_dmac_free_chan_resources;
- engine->device_prep_dma_memcpy = rcar_dmac_prep_dma_memcpy;
- engine->device_prep_slave_sg = rcar_dmac_prep_slave_sg;
- engine->device_prep_dma_cyclic = rcar_dmac_prep_dma_cyclic;
- engine->device_config = rcar_dmac_device_config;
- engine->device_terminate_all = rcar_dmac_chan_terminate_all;
- engine->device_tx_status = rcar_dmac_tx_status;
- engine->device_issue_pending = rcar_dmac_issue_pending;
- engine->device_synchronize = rcar_dmac_device_synchronize;
-
ret = dma_async_device_register(engine);
if (ret < 0)
goto error;
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index c3052fbfd092..c2b089af0420 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -79,7 +79,7 @@ static int dma40_memcpy_channels[] = {
};
/* Default configuration for physcial memcpy */
-static struct stedma40_chan_cfg dma40_memcpy_conf_phy = {
+static const struct stedma40_chan_cfg dma40_memcpy_conf_phy = {
.mode = STEDMA40_MODE_PHYSICAL,
.dir = DMA_MEM_TO_MEM,
@@ -93,7 +93,7 @@ static struct stedma40_chan_cfg dma40_memcpy_conf_phy = {
};
/* Default configuration for logical memcpy */
-static struct stedma40_chan_cfg dma40_memcpy_conf_log = {
+static const struct stedma40_chan_cfg dma40_memcpy_conf_log = {
.mode = STEDMA40_MODE_LOGICAL,
.dir = DMA_MEM_TO_MEM,
@@ -2485,19 +2485,6 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
}
static struct dma_async_tx_descriptor *
-d40_prep_memcpy_sg(struct dma_chan *chan,
- struct scatterlist *dst_sg, unsigned int dst_nents,
- struct scatterlist *src_sg, unsigned int src_nents,
- unsigned long dma_flags)
-{
- if (dst_nents != src_nents)
- return NULL;
-
- return d40_prep_sg(chan, src_sg, dst_sg, src_nents,
- DMA_MEM_TO_MEM, dma_flags);
-}
-
-static struct dma_async_tx_descriptor *
d40_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
unsigned int sg_len, enum dma_transfer_direction direction,
unsigned long dma_flags, void *context)
@@ -2821,9 +2808,6 @@ static void d40_ops_init(struct d40_base *base, struct dma_device *dev)
dev->copy_align = DMAENGINE_ALIGN_4_BYTES;
}
- if (dma_has_cap(DMA_SG, dev->cap_mask))
- dev->device_prep_dma_sg = d40_prep_memcpy_sg;
-
if (dma_has_cap(DMA_CYCLIC, dev->cap_mask))
dev->device_prep_dma_cyclic = dma40_prep_dma_cyclic;
@@ -2865,7 +2849,6 @@ static int __init d40_dmaengine_init(struct d40_base *base,
dma_cap_zero(base->dma_memcpy.cap_mask);
dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask);
- dma_cap_set(DMA_SG, base->dma_memcpy.cap_mask);
d40_ops_init(base, &base->dma_memcpy);
@@ -2883,7 +2866,6 @@ static int __init d40_dmaengine_init(struct d40_base *base,
dma_cap_zero(base->dma_both.cap_mask);
dma_cap_set(DMA_SLAVE, base->dma_both.cap_mask);
dma_cap_set(DMA_MEMCPY, base->dma_both.cap_mask);
- dma_cap_set(DMA_SG, base->dma_both.cap_mask);
dma_cap_set(DMA_CYCLIC, base->dma_slave.cap_mask);
d40_ops_init(base, &base->dma_both);
diff --git a/drivers/dma/sun6i-dma.c b/drivers/dma/sun6i-dma.c
index a2358780ab2c..bcd496edc70f 100644
--- a/drivers/dma/sun6i-dma.c
+++ b/drivers/dma/sun6i-dma.c
@@ -101,6 +101,17 @@ struct sun6i_dma_config {
u32 nr_max_channels;
u32 nr_max_requests;
u32 nr_max_vchans;
+ /*
+ * In the datasheets/user manuals of newer Allwinner SoCs, a special
+ * bit (bit 2 at register 0x20) is present.
+ * It's named "DMA MCLK interface circuit auto gating bit" in the
+ * documents, and the footnote of this register says that this bit
+ * should be set up when initializing the DMA controller.
+ * Allwinner A23/A33 user manuals do not have this bit documented,
+ * however these SoCs really have and need this bit, as seen in the
+ * BSP kernel source code.
+ */
+ bool gate_needed;
};
/*
@@ -1009,6 +1020,7 @@ static struct sun6i_dma_config sun8i_a23_dma_cfg = {
.nr_max_channels = 8,
.nr_max_requests = 24,
.nr_max_vchans = 37,
+ .gate_needed = true,
};
static struct sun6i_dma_config sun8i_a83t_dma_cfg = {
@@ -1028,11 +1040,24 @@ static struct sun6i_dma_config sun8i_h3_dma_cfg = {
.nr_max_vchans = 34,
};
+/*
+ * The V3s have only 8 physical channels, a maximum DRQ port id of 23,
+ * and a total of 24 usable source and destination endpoints.
+ */
+
+static struct sun6i_dma_config sun8i_v3s_dma_cfg = {
+ .nr_max_channels = 8,
+ .nr_max_requests = 23,
+ .nr_max_vchans = 24,
+ .gate_needed = true,
+};
+
static const struct of_device_id sun6i_dma_match[] = {
{ .compatible = "allwinner,sun6i-a31-dma", .data = &sun6i_a31_dma_cfg },
{ .compatible = "allwinner,sun8i-a23-dma", .data = &sun8i_a23_dma_cfg },
{ .compatible = "allwinner,sun8i-a83t-dma", .data = &sun8i_a83t_dma_cfg },
{ .compatible = "allwinner,sun8i-h3-dma", .data = &sun8i_h3_dma_cfg },
+ { .compatible = "allwinner,sun8i-v3s-dma", .data = &sun8i_v3s_dma_cfg },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, sun6i_dma_match);
@@ -1174,13 +1199,7 @@ static int sun6i_dma_probe(struct platform_device *pdev)
goto err_dma_unregister;
}
- /*
- * sun8i variant requires us to toggle a dma gating register,
- * as seen in Allwinner's SDK. This register is not documented
- * in the A23 user manual.
- */
- if (of_device_is_compatible(pdev->dev.of_node,
- "allwinner,sun8i-a23-dma"))
+ if (sdc->cfg->gate_needed)
writel(SUN8I_DMA_GATE_ENABLE, sdc->base + SUN8I_DMA_GATE);
return 0;
diff --git a/drivers/dma/tegra210-adma.c b/drivers/dma/tegra210-adma.c
index b10cbaa82ff5..b26256f23d67 100644
--- a/drivers/dma/tegra210-adma.c
+++ b/drivers/dma/tegra210-adma.c
@@ -717,8 +717,8 @@ static int tegra_adma_probe(struct platform_device *pdev)
tdc->chan_addr = tdma->base_addr + ADMA_CH_REG_OFFSET(i);
tdc->irq = of_irq_get(pdev->dev.of_node, i);
- if (tdc->irq < 0) {
- ret = tdc->irq;
+ if (tdc->irq <= 0) {
+ ret = tdc->irq ?: -ENXIO;
goto irq_dispose;
}
diff --git a/drivers/dma/ti-dma-crossbar.c b/drivers/dma/ti-dma-crossbar.c
index 2403475a37cf..2f65a8fde21d 100644
--- a/drivers/dma/ti-dma-crossbar.c
+++ b/drivers/dma/ti-dma-crossbar.c
@@ -308,7 +308,7 @@ static const struct of_device_id ti_dra7_master_match[] = {
static inline void ti_dra7_xbar_reserve(int offset, int len, unsigned long *p)
{
for (; len > 0; len--)
- clear_bit(offset + (len - 1), p);
+ set_bit(offset + (len - 1), p);
}
static int ti_dra7_xbar_probe(struct platform_device *pdev)
diff --git a/drivers/dma/xgene-dma.c b/drivers/dma/xgene-dma.c
index 8b693b712d0f..1d5988849aa6 100644
--- a/drivers/dma/xgene-dma.c
+++ b/drivers/dma/xgene-dma.c
@@ -391,11 +391,6 @@ static void xgene_dma_set_src_buffer(__le64 *ext8, size_t *len,
*paddr += nbytes;
}
-static void xgene_dma_invalidate_buffer(__le64 *ext8)
-{
- *ext8 |= cpu_to_le64(XGENE_DMA_INVALID_LEN_CODE);
-}
-
static __le64 *xgene_dma_lookup_ext8(struct xgene_dma_desc_hw *desc, int idx)
{
switch (idx) {
@@ -425,48 +420,6 @@ static void xgene_dma_init_desc(struct xgene_dma_desc_hw *desc,
XGENE_DMA_DESC_HOENQ_NUM_POS);
}
-static void xgene_dma_prep_cpy_desc(struct xgene_dma_chan *chan,
- struct xgene_dma_desc_sw *desc_sw,
- dma_addr_t dst, dma_addr_t src,
- size_t len)
-{
- struct xgene_dma_desc_hw *desc1, *desc2;
- int i;
-
- /* Get 1st descriptor */
- desc1 = &desc_sw->desc1;
- xgene_dma_init_desc(desc1, chan->tx_ring.dst_ring_num);
-
- /* Set destination address */
- desc1->m2 |= cpu_to_le64(XGENE_DMA_DESC_DR_BIT);
- desc1->m3 |= cpu_to_le64(dst);
-
- /* Set 1st source address */
- xgene_dma_set_src_buffer(&desc1->m1, &len, &src);
-
- if (!len)
- return;
-
- /*
- * We need to split this source buffer,
- * and need to use 2nd descriptor
- */
- desc2 = &desc_sw->desc2;
- desc1->m0 |= cpu_to_le64(XGENE_DMA_DESC_NV_BIT);
-
- /* Set 2nd to 5th source address */
- for (i = 0; i < 4 && len; i++)
- xgene_dma_set_src_buffer(xgene_dma_lookup_ext8(desc2, i),
- &len, &src);
-
- /* Invalidate unused source address field */
- for (; i < 4; i++)
- xgene_dma_invalidate_buffer(xgene_dma_lookup_ext8(desc2, i));
-
- /* Updated flag that we have prepared 64B descriptor */
- desc_sw->flags |= XGENE_DMA_FLAG_64B_DESC;
-}
-
static void xgene_dma_prep_xor_desc(struct xgene_dma_chan *chan,
struct xgene_dma_desc_sw *desc_sw,
dma_addr_t *dst, dma_addr_t *src,
@@ -891,114 +844,6 @@ static void xgene_dma_free_chan_resources(struct dma_chan *dchan)
chan->desc_pool = NULL;
}
-static struct dma_async_tx_descriptor *xgene_dma_prep_sg(
- struct dma_chan *dchan, struct scatterlist *dst_sg,
- u32 dst_nents, struct scatterlist *src_sg,
- u32 src_nents, unsigned long flags)
-{
- struct xgene_dma_desc_sw *first = NULL, *new = NULL;
- struct xgene_dma_chan *chan;
- size_t dst_avail, src_avail;
- dma_addr_t dst, src;
- size_t len;
-
- if (unlikely(!dchan))
- return NULL;
-
- if (unlikely(!dst_nents || !src_nents))
- return NULL;
-
- if (unlikely(!dst_sg || !src_sg))
- return NULL;
-
- chan = to_dma_chan(dchan);
-
- /* Get prepared for the loop */
- dst_avail = sg_dma_len(dst_sg);
- src_avail = sg_dma_len(src_sg);
- dst_nents--;
- src_nents--;
-
- /* Run until we are out of scatterlist entries */
- while (true) {
- /* Create the largest transaction possible */
- len = min_t(size_t, src_avail, dst_avail);
- len = min_t(size_t, len, XGENE_DMA_MAX_64B_DESC_BYTE_CNT);
- if (len == 0)
- goto fetch;
-
- dst = sg_dma_address(dst_sg) + sg_dma_len(dst_sg) - dst_avail;
- src = sg_dma_address(src_sg) + sg_dma_len(src_sg) - src_avail;
-
- /* Allocate the link descriptor from DMA pool */
- new = xgene_dma_alloc_descriptor(chan);
- if (!new)
- goto fail;
-
- /* Prepare DMA descriptor */
- xgene_dma_prep_cpy_desc(chan, new, dst, src, len);
-
- if (!first)
- first = new;
-
- new->tx.cookie = 0;
- async_tx_ack(&new->tx);
-
- /* update metadata */
- dst_avail -= len;
- src_avail -= len;
-
- /* Insert the link descriptor to the LD ring */
- list_add_tail(&new->node, &first->tx_list);
-
-fetch:
- /* fetch the next dst scatterlist entry */
- if (dst_avail == 0) {
- /* no more entries: we're done */
- if (dst_nents == 0)
- break;
-
- /* fetch the next entry: if there are no more: done */
- dst_sg = sg_next(dst_sg);
- if (!dst_sg)
- break;
-
- dst_nents--;
- dst_avail = sg_dma_len(dst_sg);
- }
-
- /* fetch the next src scatterlist entry */
- if (src_avail == 0) {
- /* no more entries: we're done */
- if (src_nents == 0)
- break;
-
- /* fetch the next entry: if there are no more: done */
- src_sg = sg_next(src_sg);
- if (!src_sg)
- break;
-
- src_nents--;
- src_avail = sg_dma_len(src_sg);
- }
- }
-
- if (!new)
- return NULL;
-
- new->tx.flags = flags; /* client is in control of this ack */
- new->tx.cookie = -EBUSY;
- list_splice(&first->tx_list, &new->tx_list);
-
- return &new->tx;
-fail:
- if (!first)
- return NULL;
-
- xgene_dma_free_desc_list(chan, &first->tx_list);
- return NULL;
-}
-
static struct dma_async_tx_descriptor *xgene_dma_prep_xor(
struct dma_chan *dchan, dma_addr_t dst, dma_addr_t *src,
u32 src_cnt, size_t len, unsigned long flags)
@@ -1653,7 +1498,6 @@ static void xgene_dma_set_caps(struct xgene_dma_chan *chan,
dma_cap_zero(dma_dev->cap_mask);
/* Set DMA device capability */
- dma_cap_set(DMA_SG, dma_dev->cap_mask);
/* Basically here, the X-Gene SoC DMA engine channel 0 supports XOR
* and channel 1 supports XOR, PQ both. First thing here is we have
@@ -1679,7 +1523,6 @@ static void xgene_dma_set_caps(struct xgene_dma_chan *chan,
dma_dev->device_free_chan_resources = xgene_dma_free_chan_resources;
dma_dev->device_issue_pending = xgene_dma_issue_pending;
dma_dev->device_tx_status = xgene_dma_tx_status;
- dma_dev->device_prep_dma_sg = xgene_dma_prep_sg;
if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
dma_dev->device_prep_dma_xor = xgene_dma_prep_xor;
@@ -1731,8 +1574,7 @@ static int xgene_dma_async_register(struct xgene_dma *pdma, int id)
/* DMA capability info */
dev_info(pdma->dev,
- "%s: CAPABILITY ( %s%s%s)\n", dma_chan_name(&chan->dma_chan),
- dma_has_cap(DMA_SG, dma_dev->cap_mask) ? "SGCPY " : "",
+ "%s: CAPABILITY ( %s%s)\n", dma_chan_name(&chan->dma_chan),
dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "XOR " : "",
dma_has_cap(DMA_PQ, dma_dev->cap_mask) ? "PQ " : "");
diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
index 8cf87b1a284b..8722bcba489d 100644
--- a/drivers/dma/xilinx/xilinx_dma.c
+++ b/drivers/dma/xilinx/xilinx_dma.c
@@ -2124,7 +2124,7 @@ static int axidma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
*axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
if (IS_ERR(*axi_clk)) {
err = PTR_ERR(*axi_clk);
- dev_err(&pdev->dev, "failed to get axi_aclk (%u)\n", err);
+ dev_err(&pdev->dev, "failed to get axi_aclk (%d)\n", err);
return err;
}
@@ -2142,25 +2142,25 @@ static int axidma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
err = clk_prepare_enable(*axi_clk);
if (err) {
- dev_err(&pdev->dev, "failed to enable axi_clk (%u)\n", err);
+ dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n", err);
return err;
}
err = clk_prepare_enable(*tx_clk);
if (err) {
- dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", err);
+ dev_err(&pdev->dev, "failed to enable tx_clk (%d)\n", err);
goto err_disable_axiclk;
}
err = clk_prepare_enable(*rx_clk);
if (err) {
- dev_err(&pdev->dev, "failed to enable rx_clk (%u)\n", err);
+ dev_err(&pdev->dev, "failed to enable rx_clk (%d)\n", err);
goto err_disable_txclk;
}
err = clk_prepare_enable(*sg_clk);
if (err) {
- dev_err(&pdev->dev, "failed to enable sg_clk (%u)\n", err);
+ dev_err(&pdev->dev, "failed to enable sg_clk (%d)\n", err);
goto err_disable_rxclk;
}
@@ -2189,26 +2189,26 @@ static int axicdma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
*axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
if (IS_ERR(*axi_clk)) {
err = PTR_ERR(*axi_clk);
- dev_err(&pdev->dev, "failed to get axi_clk (%u)\n", err);
+ dev_err(&pdev->dev, "failed to get axi_clk (%d)\n", err);
return err;
}
*dev_clk = devm_clk_get(&pdev->dev, "m_axi_aclk");
if (IS_ERR(*dev_clk)) {
err = PTR_ERR(*dev_clk);
- dev_err(&pdev->dev, "failed to get dev_clk (%u)\n", err);
+ dev_err(&pdev->dev, "failed to get dev_clk (%d)\n", err);
return err;
}
err = clk_prepare_enable(*axi_clk);
if (err) {
- dev_err(&pdev->dev, "failed to enable axi_clk (%u)\n", err);
+ dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n", err);
return err;
}
err = clk_prepare_enable(*dev_clk);
if (err) {
- dev_err(&pdev->dev, "failed to enable dev_clk (%u)\n", err);
+ dev_err(&pdev->dev, "failed to enable dev_clk (%d)\n", err);
goto err_disable_axiclk;
}
@@ -2229,7 +2229,7 @@ static int axivdma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
*axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
if (IS_ERR(*axi_clk)) {
err = PTR_ERR(*axi_clk);
- dev_err(&pdev->dev, "failed to get axi_aclk (%u)\n", err);
+ dev_err(&pdev->dev, "failed to get axi_aclk (%d)\n", err);
return err;
}
@@ -2251,31 +2251,31 @@ static int axivdma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
err = clk_prepare_enable(*axi_clk);
if (err) {
- dev_err(&pdev->dev, "failed to enable axi_clk (%u)\n", err);
+ dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n", err);
return err;
}
err = clk_prepare_enable(*tx_clk);
if (err) {
- dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", err);
+ dev_err(&pdev->dev, "failed to enable tx_clk (%d)\n", err);
goto err_disable_axiclk;
}
err = clk_prepare_enable(*txs_clk);
if (err) {
- dev_err(&pdev->dev, "failed to enable txs_clk (%u)\n", err);
+ dev_err(&pdev->dev, "failed to enable txs_clk (%d)\n", err);
goto err_disable_txclk;
}
err = clk_prepare_enable(*rx_clk);
if (err) {
- dev_err(&pdev->dev, "failed to enable rx_clk (%u)\n", err);
+ dev_err(&pdev->dev, "failed to enable rx_clk (%d)\n", err);
goto err_disable_txsclk;
}
err = clk_prepare_enable(*rxs_clk);
if (err) {
- dev_err(&pdev->dev, "failed to enable rxs_clk (%u)\n", err);
+ dev_err(&pdev->dev, "failed to enable rxs_clk (%d)\n", err);
goto err_disable_rxclk;
}
diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c
index 47f64192d2fd..1ee1241ca797 100644
--- a/drivers/dma/xilinx/zynqmp_dma.c
+++ b/drivers/dma/xilinx/zynqmp_dma.c
@@ -830,98 +830,6 @@ static struct dma_async_tx_descriptor *zynqmp_dma_prep_memcpy(
}
/**
- * zynqmp_dma_prep_slave_sg - prepare descriptors for a memory sg transaction
- * @dchan: DMA channel
- * @dst_sg: Destination scatter list
- * @dst_sg_len: Number of entries in destination scatter list
- * @src_sg: Source scatter list
- * @src_sg_len: Number of entries in source scatter list
- * @flags: transfer ack flags
- *
- * Return: Async transaction descriptor on success and NULL on failure
- */
-static struct dma_async_tx_descriptor *zynqmp_dma_prep_sg(
- struct dma_chan *dchan, struct scatterlist *dst_sg,
- unsigned int dst_sg_len, struct scatterlist *src_sg,
- unsigned int src_sg_len, unsigned long flags)
-{
- struct zynqmp_dma_desc_sw *new, *first = NULL;
- struct zynqmp_dma_chan *chan = to_chan(dchan);
- void *desc = NULL, *prev = NULL;
- size_t len, dst_avail, src_avail;
- dma_addr_t dma_dst, dma_src;
- u32 desc_cnt = 0, i;
- struct scatterlist *sg;
-
- for_each_sg(src_sg, sg, src_sg_len, i)
- desc_cnt += DIV_ROUND_UP(sg_dma_len(sg),
- ZYNQMP_DMA_MAX_TRANS_LEN);
-
- spin_lock_bh(&chan->lock);
- if (desc_cnt > chan->desc_free_cnt) {
- spin_unlock_bh(&chan->lock);
- dev_dbg(chan->dev, "chan %p descs are not available\n", chan);
- return NULL;
- }
- chan->desc_free_cnt = chan->desc_free_cnt - desc_cnt;
- spin_unlock_bh(&chan->lock);
-
- dst_avail = sg_dma_len(dst_sg);
- src_avail = sg_dma_len(src_sg);
-
- /* Run until we are out of scatterlist entries */
- while (true) {
- /* Allocate and populate the descriptor */
- new = zynqmp_dma_get_descriptor(chan);
- desc = (struct zynqmp_dma_desc_ll *)new->src_v;
- len = min_t(size_t, src_avail, dst_avail);
- len = min_t(size_t, len, ZYNQMP_DMA_MAX_TRANS_LEN);
- if (len == 0)
- goto fetch;
- dma_dst = sg_dma_address(dst_sg) + sg_dma_len(dst_sg) -
- dst_avail;
- dma_src = sg_dma_address(src_sg) + sg_dma_len(src_sg) -
- src_avail;
-
- zynqmp_dma_config_sg_ll_desc(chan, desc, dma_src, dma_dst,
- len, prev);
- prev = desc;
- dst_avail -= len;
- src_avail -= len;
-
- if (!first)
- first = new;
- else
- list_add_tail(&new->node, &first->tx_list);
-fetch:
- /* Fetch the next dst scatterlist entry */
- if (dst_avail == 0) {
- if (dst_sg_len == 0)
- break;
- dst_sg = sg_next(dst_sg);
- if (dst_sg == NULL)
- break;
- dst_sg_len--;
- dst_avail = sg_dma_len(dst_sg);
- }
- /* Fetch the next src scatterlist entry */
- if (src_avail == 0) {
- if (src_sg_len == 0)
- break;
- src_sg = sg_next(src_sg);
- if (src_sg == NULL)
- break;
- src_sg_len--;
- src_avail = sg_dma_len(src_sg);
- }
- }
-
- zynqmp_dma_desc_config_eod(chan, desc);
- first->async_tx.flags = flags;
- return &first->async_tx;
-}
-
-/**
* zynqmp_dma_chan_remove - Channel remove function
* @chan: ZynqMP DMA channel pointer
*/
@@ -1064,11 +972,9 @@ static int zynqmp_dma_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&zdev->common.channels);
dma_set_mask(&pdev->dev, DMA_BIT_MASK(44));
- dma_cap_set(DMA_SG, zdev->common.cap_mask);
dma_cap_set(DMA_MEMCPY, zdev->common.cap_mask);
p = &zdev->common;
- p->device_prep_dma_sg = zynqmp_dma_prep_sg;
p->device_prep_dma_memcpy = zynqmp_dma_prep_memcpy;
p->device_terminate_all = zynqmp_dma_device_terminate_all;
p->device_issue_pending = zynqmp_dma_issue_pending;
diff --git a/drivers/edac/altera_edac.c b/drivers/edac/altera_edac.c
index db75d4b614f7..346c4987b284 100644
--- a/drivers/edac/altera_edac.c
+++ b/drivers/edac/altera_edac.c
@@ -38,7 +38,6 @@
#include "edac_module.h"
#define EDAC_MOD_STR "altera_edac"
-#define EDAC_VERSION "1"
#define EDAC_DEVICE "Altera"
static const struct altr_sdram_prv_data c5_data = {
@@ -392,7 +391,6 @@ static int altr_sdram_probe(struct platform_device *pdev)
mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
mci->edac_cap = EDAC_FLAG_SECDED;
mci->mod_name = EDAC_MOD_STR;
- mci->mod_ver = EDAC_VERSION;
mci->ctl_name = dev_name(&pdev->dev);
mci->scrub_mode = SCRUB_SW_SRC;
mci->dev_name = dev_name(&pdev->dev);
@@ -749,8 +747,10 @@ static int altr_edac_device_probe(struct platform_device *pdev)
drvdata->edac_dev_name = ecc_name;
drvdata->base = devm_ioremap(&pdev->dev, r->start, resource_size(r));
- if (!drvdata->base)
+ if (!drvdata->base) {
+ res = -ENOMEM;
goto fail1;
+ }
/* Get driver specific data for this EDAC device */
drvdata->data = of_match_node(altr_edac_device_of_match, np)->data;
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 3aea55698165..ac2f30295efe 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -3130,7 +3130,6 @@ static void setup_mci_misc_attrs(struct mem_ctl_info *mci,
mci->edac_cap = determine_edac_cap(pvt);
mci->mod_name = EDAC_MOD_STR;
- mci->mod_ver = EDAC_AMD64_VERSION;
mci->ctl_name = fam->ctl_name;
mci->dev_name = pci_name(pvt->F3);
mci->ctl_page_to_phys = NULL;
diff --git a/drivers/edac/amd76x_edac.c b/drivers/edac/amd76x_edac.c
index a7450275ad28..9c6e326b4c14 100644
--- a/drivers/edac/amd76x_edac.c
+++ b/drivers/edac/amd76x_edac.c
@@ -19,7 +19,6 @@
#include <linux/edac.h>
#include "edac_module.h"
-#define AMD76X_REVISION " Ver: 2.0.2"
#define EDAC_MOD_STR "amd76x_edac"
#define amd76x_printk(level, fmt, arg...) \
@@ -263,7 +262,6 @@ static int amd76x_probe1(struct pci_dev *pdev, int dev_idx)
mci->edac_cap = ems_mode ?
(EDAC_FLAG_EC | EDAC_FLAG_SECDED) : EDAC_FLAG_NONE;
mci->mod_name = EDAC_MOD_STR;
- mci->mod_ver = AMD76X_REVISION;
mci->ctl_name = amd76x_devs[dev_idx].ctl_name;
mci->dev_name = pci_name(pdev);
mci->edac_check = amd76x_check;
diff --git a/drivers/edac/cpc925_edac.c b/drivers/edac/cpc925_edac.c
index 837b62c4993d..2c98e020df05 100644
--- a/drivers/edac/cpc925_edac.c
+++ b/drivers/edac/cpc925_edac.c
@@ -618,7 +618,7 @@ static u32 cpc925_cpu_mask_disabled(void)
}
if (reg == NULL || *reg > 2) {
- cpc925_printk(KERN_ERR, "Bad reg value at %s\n", cpunode->full_name);
+ cpc925_printk(KERN_ERR, "Bad reg value at %pOF\n", cpunode);
continue;
}
@@ -999,7 +999,6 @@ static int cpc925_probe(struct platform_device *pdev)
mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
mci->edac_cap = EDAC_FLAG_SECDED;
mci->mod_name = CPC925_EDAC_MOD_STR;
- mci->mod_ver = CPC925_EDAC_REVISION;
mci->ctl_name = pdev->name;
if (edac_op_state == EDAC_OPSTATE_POLL)
diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c
index 1a352cae1f52..b5de9a13ea3f 100644
--- a/drivers/edac/e752x_edac.c
+++ b/drivers/edac/e752x_edac.c
@@ -26,7 +26,6 @@
#include <linux/edac.h>
#include "edac_module.h"
-#define E752X_REVISION " Ver: 2.0.2"
#define EDAC_MOD_STR "e752x_edac"
static int report_non_memory_errors;
@@ -1303,7 +1302,6 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx)
(EDAC_FLAG_NONE | EDAC_FLAG_SECDED | EDAC_FLAG_S4ECD4ED);
/* FIXME - what if different memory types are in different csrows? */
mci->mod_name = EDAC_MOD_STR;
- mci->mod_ver = E752X_REVISION;
mci->pdev = &pdev->dev;
edac_dbg(3, "init pvt\n");
diff --git a/drivers/edac/e7xxx_edac.c b/drivers/edac/e7xxx_edac.c
index 67ef07aed923..75d7ce62b3be 100644
--- a/drivers/edac/e7xxx_edac.c
+++ b/drivers/edac/e7xxx_edac.c
@@ -32,7 +32,6 @@
#include <linux/edac.h>
#include "edac_module.h"
-#define E7XXX_REVISION " Ver: 2.0.2"
#define EDAC_MOD_STR "e7xxx_edac"
#define e7xxx_printk(level, fmt, arg...) \
@@ -458,7 +457,6 @@ static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx)
EDAC_FLAG_S4ECD4ED;
/* FIXME - what if different memory types are in different csrows? */
mci->mod_name = EDAC_MOD_STR;
- mci->mod_ver = E7XXX_REVISION;
mci->pdev = &pdev->dev;
edac_dbg(3, "init pvt\n");
pvt = (struct e7xxx_pvt *)mci->pvt_info;
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
index 445862dac273..e4fcfa84fbd3 100644
--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -287,7 +287,7 @@ static struct attribute *csrow_attrs[] = {
NULL,
};
-static struct attribute_group csrow_attr_grp = {
+static const struct attribute_group csrow_attr_grp = {
.attrs = csrow_attrs,
};
@@ -304,7 +304,7 @@ static void csrow_attr_release(struct device *dev)
kfree(csrow);
}
-static struct device_type csrow_attr_type = {
+static const struct device_type csrow_attr_type = {
.groups = csrow_attr_groups,
.release = csrow_attr_release,
};
@@ -627,7 +627,7 @@ static struct attribute *dimm_attrs[] = {
NULL,
};
-static struct attribute_group dimm_attr_grp = {
+static const struct attribute_group dimm_attr_grp = {
.attrs = dimm_attrs,
};
@@ -644,7 +644,7 @@ static void dimm_attr_release(struct device *dev)
kfree(dimm);
}
-static struct device_type dimm_attr_type = {
+static const struct device_type dimm_attr_type = {
.groups = dimm_attr_groups,
.release = dimm_attr_release,
};
@@ -902,7 +902,7 @@ static umode_t mci_attr_is_visible(struct kobject *kobj,
return mode;
}
-static struct attribute_group mci_attr_grp = {
+static const struct attribute_group mci_attr_grp = {
.attrs = mci_attrs,
.is_visible = mci_attr_is_visible,
};
@@ -920,7 +920,7 @@ static void mci_attr_release(struct device *dev)
kfree(mci);
}
-static struct device_type mci_attr_type = {
+static const struct device_type mci_attr_type = {
.groups = mci_attr_groups,
.release = mci_attr_release,
};
@@ -1074,7 +1074,7 @@ static void mc_attr_release(struct device *dev)
kfree(dev);
}
-static struct device_type mc_attr_type = {
+static const struct device_type mc_attr_type = {
.release = mc_attr_release,
};
/*
diff --git a/drivers/edac/ghes_edac.c b/drivers/edac/ghes_edac.c
index 4e61a6229dd2..6f80eb65c26c 100644
--- a/drivers/edac/ghes_edac.c
+++ b/drivers/edac/ghes_edac.c
@@ -17,8 +17,6 @@
#include "edac_module.h"
#include <ras/ras_event.h>
-#define GHES_EDAC_REVISION " Ver: 1.0.0"
-
struct ghes_edac_pvt {
struct list_head list;
struct ghes *ghes;
@@ -451,7 +449,6 @@ int ghes_edac_register(struct ghes *ghes, struct device *dev)
mci->edac_ctl_cap = EDAC_FLAG_NONE;
mci->edac_cap = EDAC_FLAG_NONE;
mci->mod_name = "ghes_edac.c";
- mci->mod_ver = GHES_EDAC_REVISION;
mci->ctl_name = "ghes_edac";
mci->dev_name = "ghes";
diff --git a/drivers/edac/highbank_mc_edac.c b/drivers/edac/highbank_mc_edac.c
index 0e7e0a404d89..6092e61be605 100644
--- a/drivers/edac/highbank_mc_edac.c
+++ b/drivers/edac/highbank_mc_edac.c
@@ -224,7 +224,6 @@ static int highbank_mc_probe(struct platform_device *pdev)
mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
mci->edac_cap = EDAC_FLAG_SECDED;
mci->mod_name = pdev->dev.driver->name;
- mci->mod_ver = "1";
mci->ctl_name = id->compatible;
mci->dev_name = dev_name(&pdev->dev);
mci->scrub_mode = SCRUB_SW_SRC;
diff --git a/drivers/edac/i3000_edac.c b/drivers/edac/i3000_edac.c
index 5306240570d7..8085a32ec3bd 100644
--- a/drivers/edac/i3000_edac.c
+++ b/drivers/edac/i3000_edac.c
@@ -16,8 +16,6 @@
#include <linux/edac.h>
#include "edac_module.h"
-#define I3000_REVISION "1.1"
-
#define EDAC_MOD_STR "i3000_edac"
#define I3000_RANKS 8
@@ -375,7 +373,6 @@ static int i3000_probe1(struct pci_dev *pdev, int dev_idx)
mci->edac_cap = EDAC_FLAG_SECDED;
mci->mod_name = EDAC_MOD_STR;
- mci->mod_ver = I3000_REVISION;
mci->ctl_name = i3000_devs[dev_idx].ctl_name;
mci->dev_name = pci_name(pdev);
mci->edac_check = i3000_check;
diff --git a/drivers/edac/i3200_edac.c b/drivers/edac/i3200_edac.c
index 77c58d201a30..d92d56cee101 100644
--- a/drivers/edac/i3200_edac.c
+++ b/drivers/edac/i3200_edac.c
@@ -17,8 +17,6 @@
#include <linux/io-64-nonatomic-lo-hi.h>
-#define I3200_REVISION "1.1"
-
#define EDAC_MOD_STR "i3200_edac"
#define PCI_DEVICE_ID_INTEL_3200_HB 0x29f0
@@ -375,7 +373,6 @@ static int i3200_probe1(struct pci_dev *pdev, int dev_idx)
mci->edac_cap = EDAC_FLAG_SECDED;
mci->mod_name = EDAC_MOD_STR;
- mci->mod_ver = I3200_REVISION;
mci->ctl_name = i3200_devs[dev_idx].ctl_name;
mci->dev_name = pci_name(pdev);
mci->edac_check = i3200_check;
diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
index 8f5a56e25bd2..53f24b18cd61 100644
--- a/drivers/edac/i5000_edac.c
+++ b/drivers/edac/i5000_edac.c
@@ -1430,7 +1430,6 @@ static int i5000_probe1(struct pci_dev *pdev, int dev_idx)
mci->edac_ctl_cap = EDAC_FLAG_NONE;
mci->edac_cap = EDAC_FLAG_NONE;
mci->mod_name = "i5000_edac.c";
- mci->mod_ver = I5000_REVISION;
mci->ctl_name = i5000_devs[dev_idx].ctl_name;
mci->dev_name = pci_name(pdev);
mci->ctl_page_to_phys = NULL;
diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c
index a8334c4acea7..b506eef6b146 100644
--- a/drivers/edac/i5100_edac.c
+++ b/drivers/edac/i5100_edac.c
@@ -1108,7 +1108,6 @@ static int i5100_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
mci->edac_ctl_cap = EDAC_FLAG_SECDED;
mci->edac_cap = EDAC_FLAG_SECDED;
mci->mod_name = "i5100_edac.c";
- mci->mod_ver = "not versioned";
mci->ctl_name = "i5100";
mci->dev_name = pci_name(pdev);
mci->ctl_page_to_phys = NULL;
diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c
index cd889edc8516..6f8bcdb9256a 100644
--- a/drivers/edac/i5400_edac.c
+++ b/drivers/edac/i5400_edac.c
@@ -1315,7 +1315,6 @@ static int i5400_probe1(struct pci_dev *pdev, int dev_idx)
mci->edac_ctl_cap = EDAC_FLAG_NONE;
mci->edac_cap = EDAC_FLAG_NONE;
mci->mod_name = "i5400_edac.c";
- mci->mod_ver = I5400_REVISION;
mci->ctl_name = i5400_devs[dev_idx].ctl_name;
mci->dev_name = pci_name(pdev);
mci->ctl_page_to_phys = NULL;
diff --git a/drivers/edac/i7300_edac.c b/drivers/edac/i7300_edac.c
index e391f5a716be..6b5a554ba8e4 100644
--- a/drivers/edac/i7300_edac.c
+++ b/drivers/edac/i7300_edac.c
@@ -1077,7 +1077,6 @@ static int i7300_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
mci->edac_ctl_cap = EDAC_FLAG_NONE;
mci->edac_cap = EDAC_FLAG_NONE;
mci->mod_name = "i7300_edac.c";
- mci->mod_ver = I7300_REVISION;
mci->ctl_name = i7300_devs[0].ctl_name;
mci->dev_name = pci_name(pdev);
mci->ctl_page_to_phys = NULL;
diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
index 75ad847593b7..c16c3b931b3d 100644
--- a/drivers/edac/i7core_edac.c
+++ b/drivers/edac/i7core_edac.c
@@ -1079,7 +1079,7 @@ static struct attribute *i7core_addrmatch_attrs[] = {
NULL
};
-static struct attribute_group addrmatch_grp = {
+static const struct attribute_group addrmatch_grp = {
.attrs = i7core_addrmatch_attrs,
};
@@ -1094,7 +1094,7 @@ static void addrmatch_release(struct device *device)
kfree(device);
}
-static struct device_type addrmatch_type = {
+static const struct device_type addrmatch_type = {
.groups = addrmatch_groups,
.release = addrmatch_release,
};
@@ -1110,7 +1110,7 @@ static struct attribute *i7core_udimm_counters_attrs[] = {
NULL
};
-static struct attribute_group all_channel_counts_grp = {
+static const struct attribute_group all_channel_counts_grp = {
.attrs = i7core_udimm_counters_attrs,
};
@@ -1125,7 +1125,7 @@ static void all_channel_counts_release(struct device *device)
kfree(device);
}
-static struct device_type all_channel_counts_type = {
+static const struct device_type all_channel_counts_type = {
.groups = all_channel_counts_groups,
.release = all_channel_counts_release,
};
@@ -2159,7 +2159,6 @@ static int i7core_register_mci(struct i7core_dev *i7core_dev)
mci->edac_ctl_cap = EDAC_FLAG_NONE;
mci->edac_cap = EDAC_FLAG_NONE;
mci->mod_name = "i7core_edac.c";
- mci->mod_ver = I7CORE_REVISION;
mci->ctl_name = kasprintf(GFP_KERNEL, "i7 core #%d",
i7core_dev->socket);
mci->dev_name = pci_name(i7core_dev->pdev[0]);
diff --git a/drivers/edac/i82443bxgx_edac.c b/drivers/edac/i82443bxgx_edac.c
index cb61a5b7d080..a2ca929e2168 100644
--- a/drivers/edac/i82443bxgx_edac.c
+++ b/drivers/edac/i82443bxgx_edac.c
@@ -31,8 +31,6 @@
#include <linux/edac.h>
#include "edac_module.h"
-#define I82443_REVISION "0.1"
-
#define EDAC_MOD_STR "i82443bxgx_edac"
/* The 82443BX supports SDRAM, or EDO (EDO for mobile only), "Memory
@@ -320,7 +318,6 @@ static int i82443bxgx_edacmc_probe1(struct pci_dev *pdev, int dev_idx)
I82443BXGX_EAP_OFFSET_MBE));
mci->mod_name = EDAC_MOD_STR;
- mci->mod_ver = I82443_REVISION;
mci->ctl_name = "I82443BXGX";
mci->dev_name = pci_name(pdev);
mci->edac_check = i82443bxgx_edacmc_check;
diff --git a/drivers/edac/i82860_edac.c b/drivers/edac/i82860_edac.c
index 236c813227fc..3e3a80ffb322 100644
--- a/drivers/edac/i82860_edac.c
+++ b/drivers/edac/i82860_edac.c
@@ -16,7 +16,6 @@
#include <linux/edac.h>
#include "edac_module.h"
-#define I82860_REVISION " Ver: 2.0.2"
#define EDAC_MOD_STR "i82860_edac"
#define i82860_printk(level, fmt, arg...) \
@@ -216,7 +215,6 @@ static int i82860_probe1(struct pci_dev *pdev, int dev_idx)
/* I"m not sure about this but I think that all RDRAM is SECDED */
mci->edac_cap = EDAC_FLAG_SECDED;
mci->mod_name = EDAC_MOD_STR;
- mci->mod_ver = I82860_REVISION;
mci->ctl_name = i82860_devs[dev_idx].ctl_name;
mci->dev_name = pci_name(pdev);
mci->edac_check = i82860_check;
diff --git a/drivers/edac/i82875p_edac.c b/drivers/edac/i82875p_edac.c
index e286b7e74c7a..ceac925af38c 100644
--- a/drivers/edac/i82875p_edac.c
+++ b/drivers/edac/i82875p_edac.c
@@ -20,7 +20,6 @@
#include <linux/edac.h>
#include "edac_module.h"
-#define I82875P_REVISION " Ver: 2.0.2"
#define EDAC_MOD_STR "i82875p_edac"
#define i82875p_printk(level, fmt, arg...) \
@@ -423,7 +422,6 @@ static int i82875p_probe1(struct pci_dev *pdev, int dev_idx)
mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
mci->edac_cap = EDAC_FLAG_UNKNOWN;
mci->mod_name = EDAC_MOD_STR;
- mci->mod_ver = I82875P_REVISION;
mci->ctl_name = i82875p_devs[dev_idx].ctl_name;
mci->dev_name = pci_name(pdev);
mci->edac_check = i82875p_check;
diff --git a/drivers/edac/i82975x_edac.c b/drivers/edac/i82975x_edac.c
index 9dcdab28f665..892815eaa97b 100644
--- a/drivers/edac/i82975x_edac.c
+++ b/drivers/edac/i82975x_edac.c
@@ -16,7 +16,6 @@
#include <linux/edac.h>
#include "edac_module.h"
-#define I82975X_REVISION " Ver: 1.0.0"
#define EDAC_MOD_STR "i82975x_edac"
#define i82975x_printk(level, fmt, arg...) \
@@ -564,7 +563,6 @@ static int i82975x_probe1(struct pci_dev *pdev, int dev_idx)
mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
mci->edac_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
mci->mod_name = EDAC_MOD_STR;
- mci->mod_ver = I82975X_REVISION;
mci->ctl_name = i82975x_devs[dev_idx].ctl_name;
mci->dev_name = pci_name(pdev);
mci->edac_check = i82975x_check;
diff --git a/drivers/edac/ie31200_edac.c b/drivers/edac/ie31200_edac.c
index 4260579e6901..aac9b9b360b8 100644
--- a/drivers/edac/ie31200_edac.c
+++ b/drivers/edac/ie31200_edac.c
@@ -45,7 +45,6 @@
#include <linux/io-64-nonatomic-lo-hi.h>
#include "edac_module.h"
-#define IE31200_REVISION "1.0"
#define EDAC_MOD_STR "ie31200_edac"
#define ie31200_printk(level, fmt, arg...) \
@@ -420,7 +419,6 @@ static int ie31200_probe1(struct pci_dev *pdev, int dev_idx)
mci->edac_ctl_cap = EDAC_FLAG_SECDED;
mci->edac_cap = EDAC_FLAG_SECDED;
mci->mod_name = EDAC_MOD_STR;
- mci->mod_ver = IE31200_REVISION;
mci->ctl_name = ie31200_devs[dev_idx].ctl_name;
mci->dev_name = pci_name(pdev);
mci->edac_check = ie31200_check;
diff --git a/drivers/edac/mce_amd.c b/drivers/edac/mce_amd.c
index 9a2658a256a9..a11a671c7a38 100644
--- a/drivers/edac/mce_amd.c
+++ b/drivers/edac/mce_amd.c
@@ -1,6 +1,8 @@
#include <linux/module.h>
#include <linux/slab.h>
+#include <asm/cpu.h>
+
#include "mce_amd.h"
static struct amd_decoder_ops *fam_ops;
@@ -744,7 +746,7 @@ static void decode_mc3_mce(struct mce *m)
static void decode_mc4_mce(struct mce *m)
{
- struct cpuinfo_x86 *c = &boot_cpu_data;
+ unsigned int fam = x86_family(m->cpuid);
int node_id = amd_get_nb_id(m->extcpu);
u16 ec = EC(m->status);
u8 xec = XEC(m->status, 0x1f);
@@ -758,7 +760,7 @@ static void decode_mc4_mce(struct mce *m)
/* special handling for DRAM ECCs */
if (xec == 0x0 || xec == 0x8) {
/* no ECCs on F11h */
- if (c->x86 == 0x11)
+ if (fam == 0x11)
goto wrong_mc4_mce;
pr_cont("%s.\n", mc4_mce_desc[xec]);
@@ -779,7 +781,7 @@ static void decode_mc4_mce(struct mce *m)
return;
case 0x19:
- if (boot_cpu_data.x86 == 0x15 || boot_cpu_data.x86 == 0x16)
+ if (fam == 0x15 || fam == 0x16)
pr_cont("Compute Unit Data Error.\n");
else
goto wrong_mc4_mce;
@@ -802,11 +804,11 @@ static void decode_mc4_mce(struct mce *m)
static void decode_mc5_mce(struct mce *m)
{
- struct cpuinfo_x86 *c = &boot_cpu_data;
+ unsigned int fam = x86_family(m->cpuid);
u16 ec = EC(m->status);
u8 xec = XEC(m->status, xec_mask);
- if (c->x86 == 0xf || c->x86 == 0x11)
+ if (fam == 0xf || fam == 0x11)
goto wrong_mc5_mce;
pr_emerg(HW_ERR "MC5 Error: ");
@@ -849,7 +851,7 @@ static void decode_mc6_mce(struct mce *m)
}
/* Decode errors according to Scalable MCA specification */
-static void decode_smca_errors(struct mce *m)
+static void decode_smca_error(struct mce *m)
{
struct smca_hwid *hwid;
unsigned int bank_type;
@@ -859,7 +861,7 @@ static void decode_smca_errors(struct mce *m)
if (m->bank >= ARRAY_SIZE(smca_banks))
return;
- if (boot_cpu_data.x86 >= 0x17 && m->bank == 4)
+ if (x86_family(m->cpuid) >= 0x17 && m->bank == 4)
pr_emerg(HW_ERR "Bank 4 is reserved on Fam17h.\n");
hwid = smca_banks[m->bank].hwid;
@@ -878,12 +880,8 @@ static void decode_smca_errors(struct mce *m)
pr_cont("%s.\n", smca_mce_descs[bank_type].descs[xec]);
}
- /*
- * amd_get_nb_id() returns the last level cache id.
- * The last level cache on Fam17h is 1 level below the node.
- */
if (bank_type == SMCA_UMC && xec == 0 && decode_dram_ecc)
- decode_dram_ecc(amd_get_nb_id(m->extcpu) >> 1, m);
+ decode_dram_ecc(cpu_to_node(m->extcpu), m);
}
static inline void amd_decode_err_code(u16 ec)
@@ -915,12 +913,10 @@ static inline void amd_decode_err_code(u16 ec)
*/
static bool amd_filter_mce(struct mce *m)
{
- u8 xec = (m->status >> 16) & 0x1f;
-
/*
* NB GART TLB error reporting is disabled by default.
*/
- if (m->bank == 4 && xec == 0x5 && !report_gart_errors)
+ if (m->bank == 4 && XEC(m->status, 0x1f) == 0x5 && !report_gart_errors)
return true;
return false;
@@ -946,7 +942,7 @@ static int
amd_decode_mce(struct notifier_block *nb, unsigned long val, void *data)
{
struct mce *m = (struct mce *)data;
- struct cpuinfo_x86 *c = &cpu_data(m->extcpu);
+ unsigned int fam = x86_family(m->cpuid);
int ecc;
if (amd_filter_mce(m))
@@ -956,7 +952,7 @@ amd_decode_mce(struct notifier_block *nb, unsigned long val, void *data)
pr_emerg(HW_ERR "CPU:%d (%x:%x:%x) MC%d_STATUS[%s|%s|%s|%s|%s",
m->extcpu,
- c->x86, c->x86_model, c->x86_mask,
+ fam, x86_model(m->cpuid), x86_stepping(m->cpuid),
m->bank,
((m->status & MCI_STATUS_OVER) ? "Over" : "-"),
((m->status & MCI_STATUS_UC) ? "UE" :
@@ -965,11 +961,11 @@ amd_decode_mce(struct notifier_block *nb, unsigned long val, void *data)
((m->status & MCI_STATUS_PCC) ? "PCC" : "-"),
((m->status & MCI_STATUS_ADDRV) ? "AddrV" : "-"));
- if (c->x86 >= 0x15) {
+ if (fam >= 0x15) {
pr_cont("|%s", (m->status & MCI_STATUS_DEFERRED ? "Deferred" : "-"));
/* F15h, bank4, bit 43 is part of McaStatSubCache. */
- if (c->x86 != 0x15 || m->bank != 4)
+ if (fam != 0x15 || m->bank != 4)
pr_cont("|%s", (m->status & MCI_STATUS_POISON ? "Poison" : "-"));
}
@@ -1002,7 +998,7 @@ amd_decode_mce(struct notifier_block *nb, unsigned long val, void *data)
pr_cont("\n");
- decode_smca_errors(m);
+ decode_smca_error(m);
goto err_code;
}
diff --git a/drivers/edac/mv64x60_edac.c b/drivers/edac/mv64x60_edac.c
index d3650df94fe8..ec5d695bbb72 100644
--- a/drivers/edac/mv64x60_edac.c
+++ b/drivers/edac/mv64x60_edac.c
@@ -766,7 +766,6 @@ static int mv64x60_mc_err_probe(struct platform_device *pdev)
mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
mci->edac_cap = EDAC_FLAG_SECDED;
mci->mod_name = EDAC_MOD_STR;
- mci->mod_ver = MV64x60_REVISION;
mci->ctl_name = mv64x60_ctl_name;
if (edac_op_state == EDAC_OPSTATE_POLL)
diff --git a/drivers/edac/pnd2_edac.c b/drivers/edac/pnd2_edac.c
index 8e599490f6de..4395c84cdcbf 100644
--- a/drivers/edac/pnd2_edac.c
+++ b/drivers/edac/pnd2_edac.c
@@ -129,42 +129,72 @@ static struct mem_ctl_info *pnd2_mci;
#define GET_BITFIELD(v, lo, hi) (((v) & GENMASK_ULL(hi, lo)) >> (lo))
#define U64_LSHIFT(val, s) ((u64)(val) << (s))
-#ifdef CONFIG_X86_INTEL_SBI_APL
-#include "linux/platform_data/sbi_apl.h"
-static int sbi_send(int port, int off, int op, u32 *data)
+/*
+ * On Apollo Lake we access memory controller registers via a
+ * side-band mailbox style interface in a hidden PCI device
+ * configuration space.
+ */
+static struct pci_bus *p2sb_bus;
+#define P2SB_DEVFN PCI_DEVFN(0xd, 0)
+#define P2SB_ADDR_OFF 0xd0
+#define P2SB_DATA_OFF 0xd4
+#define P2SB_STAT_OFF 0xd8
+#define P2SB_ROUT_OFF 0xda
+#define P2SB_EADD_OFF 0xdc
+#define P2SB_HIDE_OFF 0xe1
+
+#define P2SB_BUSY 1
+
+#define P2SB_READ(size, off, ptr) \
+ pci_bus_read_config_##size(p2sb_bus, P2SB_DEVFN, off, ptr)
+#define P2SB_WRITE(size, off, val) \
+ pci_bus_write_config_##size(p2sb_bus, P2SB_DEVFN, off, val)
+
+static bool p2sb_is_busy(u16 *status)
{
- struct sbi_apl_message sbi_arg;
- int ret, read = 0;
+ P2SB_READ(word, P2SB_STAT_OFF, status);
- memset(&sbi_arg, 0, sizeof(sbi_arg));
+ return !!(*status & P2SB_BUSY);
+}
- if (op == 0 || op == 4 || op == 6)
- read = 1;
- else
- sbi_arg.data = *data;
+static int _apl_rd_reg(int port, int off, int op, u32 *data)
+{
+ int retries = 0xff, ret;
+ u16 status;
+ u8 hidden;
+
+ /* Unhide the P2SB device, if it's hidden */
+ P2SB_READ(byte, P2SB_HIDE_OFF, &hidden);
+ if (hidden)
+ P2SB_WRITE(byte, P2SB_HIDE_OFF, 0);
+
+ if (p2sb_is_busy(&status)) {
+ ret = -EAGAIN;
+ goto out;
+ }
- sbi_arg.opcode = op;
- sbi_arg.port_address = port;
- sbi_arg.register_offset = off;
- ret = sbi_apl_commit(&sbi_arg);
- if (ret || sbi_arg.status)
- edac_dbg(2, "sbi_send status=%d ret=%d data=%x\n",
- sbi_arg.status, ret, sbi_arg.data);
+ P2SB_WRITE(dword, P2SB_ADDR_OFF, (port << 24) | off);
+ P2SB_WRITE(dword, P2SB_DATA_OFF, 0);
+ P2SB_WRITE(dword, P2SB_EADD_OFF, 0);
+ P2SB_WRITE(word, P2SB_ROUT_OFF, 0);
+ P2SB_WRITE(word, P2SB_STAT_OFF, (op << 8) | P2SB_BUSY);
- if (ret == 0)
- ret = sbi_arg.status;
+ while (p2sb_is_busy(&status)) {
+ if (retries-- == 0) {
+ ret = -EBUSY;
+ goto out;
+ }
+ }
- if (ret == 0 && read)
- *data = sbi_arg.data;
+ P2SB_READ(dword, P2SB_DATA_OFF, data);
+ ret = (status >> 1) & 0x3;
+out:
+ /* Hide the P2SB device, if it was hidden before */
+ if (hidden)
+ P2SB_WRITE(byte, P2SB_HIDE_OFF, hidden);
return ret;
}
-#else
-static int sbi_send(int port, int off, int op, u32 *data)
-{
- return -EUNATCH;
-}
-#endif
static int apl_rd_reg(int port, int off, int op, void *data, size_t sz, char *name)
{
@@ -173,10 +203,10 @@ static int apl_rd_reg(int port, int off, int op, void *data, size_t sz, char *na
edac_dbg(2, "Read %s port=%x off=%x op=%x\n", name, port, off, op);
switch (sz) {
case 8:
- ret = sbi_send(port, off + 4, op, (u32 *)(data + 4));
+ ret = _apl_rd_reg(port, off + 4, op, (u32 *)(data + 4));
/* fall through */
case 4:
- ret |= sbi_send(port, off, op, (u32 *)data);
+ ret |= _apl_rd_reg(port, off, op, (u32 *)data);
pnd2_printk(KERN_DEBUG, "%s=%x%08x ret=%d\n", name,
sz == 8 ? *((u32 *)(data + 4)) : 0, *((u32 *)data), ret);
break;
@@ -212,11 +242,23 @@ static u64 get_sideband_reg_base_addr(void)
{
struct pci_dev *pdev;
u32 hi, lo;
+ u8 hidden;
pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x19dd, NULL);
if (pdev) {
+ /* Unhide the P2SB device, if it's hidden */
+ pci_read_config_byte(pdev, 0xe1, &hidden);
+ if (hidden)
+ pci_write_config_byte(pdev, 0xe1, 0);
+
pci_read_config_dword(pdev, 0x10, &lo);
pci_read_config_dword(pdev, 0x14, &hi);
+ lo &= 0xfffffff0;
+
+ /* Hide the P2SB device, if it was hidden before */
+ if (hidden)
+ pci_write_config_byte(pdev, 0xe1, hidden);
+
pci_dev_put(pdev);
return (U64_LSHIFT(hi, 32) | U64_LSHIFT(lo, 0));
} else {
@@ -1515,6 +1557,12 @@ static int __init pnd2_init(void)
ops = (struct dunit_ops *)id->driver_data;
+ if (ops->type == APL) {
+ p2sb_bus = pci_find_bus(0, 0);
+ if (!p2sb_bus)
+ return -ENODEV;
+ }
+
/* Ensure that the OPSTATE is set correctly for POLL or NMI */
opstate_init();
diff --git a/drivers/edac/ppc4xx_edac.c b/drivers/edac/ppc4xx_edac.c
index e55e92590106..fd3202c30f69 100644
--- a/drivers/edac/ppc4xx_edac.c
+++ b/drivers/edac/ppc4xx_edac.c
@@ -1063,7 +1063,6 @@ static int ppc4xx_edac_mc_init(struct mem_ctl_info *mci,
/* Initialize strings */
mci->mod_name = PPC4XX_EDAC_MODULE_NAME;
- mci->mod_ver = PPC4XX_EDAC_MODULE_REVISION;
mci->ctl_name = ppc4xx_edac_match->compatible,
mci->dev_name = np->full_name;
@@ -1267,8 +1266,8 @@ static int ppc4xx_edac_probe(struct platform_device *op)
memcheck = (mcopt1 & SDRAM_MCOPT1_MCHK_MASK);
if (memcheck == SDRAM_MCOPT1_MCHK_NON) {
- ppc4xx_edac_printk(KERN_INFO, "%s: No ECC memory detected or "
- "ECC is disabled.\n", np->full_name);
+ ppc4xx_edac_printk(KERN_INFO, "%pOF: No ECC memory detected or "
+ "ECC is disabled.\n", np);
status = -ENODEV;
goto done;
}
@@ -1287,9 +1286,9 @@ static int ppc4xx_edac_probe(struct platform_device *op)
mci = edac_mc_alloc(ppc4xx_edac_instance, ARRAY_SIZE(layers), layers,
sizeof(struct ppc4xx_edac_pdata));
if (mci == NULL) {
- ppc4xx_edac_printk(KERN_ERR, "%s: "
+ ppc4xx_edac_printk(KERN_ERR, "%pOF: "
"Failed to allocate EDAC MC instance!\n",
- np->full_name);
+ np);
status = -ENOMEM;
goto done;
}
diff --git a/drivers/edac/r82600_edac.c b/drivers/edac/r82600_edac.c
index 978916625ced..851e53e122aa 100644
--- a/drivers/edac/r82600_edac.c
+++ b/drivers/edac/r82600_edac.c
@@ -22,7 +22,6 @@
#include <linux/edac.h>
#include "edac_module.h"
-#define R82600_REVISION " Ver: 2.0.2"
#define EDAC_MOD_STR "r82600_edac"
#define r82600_printk(level, fmt, arg...) \
@@ -316,7 +315,6 @@ static int r82600_probe1(struct pci_dev *pdev, int dev_idx)
mci->edac_cap = EDAC_FLAG_NONE;
mci->mod_name = EDAC_MOD_STR;
- mci->mod_ver = R82600_REVISION;
mci->ctl_name = "R82600";
mci->dev_name = pci_name(pdev);
mci->edac_check = r82600_check;
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
index 80d860cb0746..dc0591654011 100644
--- a/drivers/edac/sb_edac.c
+++ b/drivers/edac/sb_edac.c
@@ -300,6 +300,12 @@ enum domain {
SOCK,
};
+enum mirroring_mode {
+ NON_MIRRORING,
+ ADDR_RANGE_MIRRORING,
+ FULL_MIRRORING,
+};
+
struct sbridge_pvt;
struct sbridge_info {
enum type type;
@@ -377,8 +383,9 @@ struct sbridge_pvt {
struct sbridge_channel channel[NUM_CHANNELS];
/* Memory type detection */
- bool is_mirrored, is_lockstep, is_close_pg;
+ bool is_cur_addr_mirrored, is_lockstep, is_close_pg;
bool is_chan_hash;
+ enum mirroring_mode mirror_mode;
/* Memory description */
u64 tolm, tohm;
@@ -1648,10 +1655,6 @@ static int get_dimm_config(struct mem_ctl_info *mci)
enum edac_type mode;
u32 reg;
- if (pvt->info.type == HASWELL || pvt->info.type == BROADWELL) {
- pci_read_config_dword(pvt->pci_ha, HASWELL_HASYSDEFEATURE2, &reg);
- pvt->is_chan_hash = GET_BITFIELD(reg, 21, 21);
- }
pvt->sbridge_dev->node_id = pvt->info.get_node_id(pvt);
edac_dbg(0, "mc#%d: Node ID: %d, source ID: %d\n",
pvt->sbridge_dev->mc,
@@ -1663,22 +1666,45 @@ static int get_dimm_config(struct mem_ctl_info *mci)
*/
if (pvt->info.type == KNIGHTS_LANDING) {
mode = EDAC_S4ECD4ED;
- pvt->is_mirrored = false;
+ pvt->mirror_mode = NON_MIRRORING;
+ pvt->is_cur_addr_mirrored = false;
if (knl_get_dimm_capacity(pvt, knl_mc_sizes) != 0)
return -1;
- pci_read_config_dword(pvt->pci_ta, KNL_MCMTR, &pvt->info.mcmtr);
+ if (pci_read_config_dword(pvt->pci_ta, KNL_MCMTR, &pvt->info.mcmtr)) {
+ edac_dbg(0, "Failed to read KNL_MCMTR register\n");
+ return -ENODEV;
+ }
} else {
- pci_read_config_dword(pvt->pci_ras, RASENABLES, &reg);
+ if (pvt->info.type == HASWELL || pvt->info.type == BROADWELL) {
+ if (pci_read_config_dword(pvt->pci_ha, HASWELL_HASYSDEFEATURE2, &reg)) {
+ edac_dbg(0, "Failed to read HASWELL_HASYSDEFEATURE2 register\n");
+ return -ENODEV;
+ }
+ pvt->is_chan_hash = GET_BITFIELD(reg, 21, 21);
+ if (GET_BITFIELD(reg, 28, 28)) {
+ pvt->mirror_mode = ADDR_RANGE_MIRRORING;
+ edac_dbg(0, "Address range partial memory mirroring is enabled\n");
+ goto next;
+ }
+ }
+ if (pci_read_config_dword(pvt->pci_ras, RASENABLES, &reg)) {
+ edac_dbg(0, "Failed to read RASENABLES register\n");
+ return -ENODEV;
+ }
if (IS_MIRROR_ENABLED(reg)) {
- edac_dbg(0, "Memory mirror is enabled\n");
- pvt->is_mirrored = true;
+ pvt->mirror_mode = FULL_MIRRORING;
+ edac_dbg(0, "Full memory mirroring is enabled\n");
} else {
- edac_dbg(0, "Memory mirror is disabled\n");
- pvt->is_mirrored = false;
+ pvt->mirror_mode = NON_MIRRORING;
+ edac_dbg(0, "Memory mirroring is disabled\n");
}
- pci_read_config_dword(pvt->pci_ta, MCMTR, &pvt->info.mcmtr);
+next:
+ if (pci_read_config_dword(pvt->pci_ta, MCMTR, &pvt->info.mcmtr)) {
+ edac_dbg(0, "Failed to read MCMTR register\n");
+ return -ENODEV;
+ }
if (IS_LOCKSTEP_ENABLED(pvt->info.mcmtr)) {
edac_dbg(0, "Lockstep is enabled\n");
mode = EDAC_S8ECD8ED;
@@ -2092,7 +2118,8 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
pci_read_config_dword(pvt->pci_tad[base_ch], tad_ch_nilv_offset[n_tads], &tad_offset);
- if (pvt->is_mirrored) {
+ if (pvt->mirror_mode == FULL_MIRRORING ||
+ (pvt->mirror_mode == ADDR_RANGE_MIRRORING && n_tads == 0)) {
*channel_mask |= 1 << ((base_ch + 2) % 4);
switch(ch_way) {
case 2:
@@ -2103,8 +2130,12 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
sprintf(msg, "Invalid mirror set. Can't decode addr");
return -EINVAL;
}
- } else
+
+ pvt->is_cur_addr_mirrored = true;
+ } else {
sck_xch = (1 << sck_way) * ch_way;
+ pvt->is_cur_addr_mirrored = false;
+ }
if (pvt->is_lockstep)
*channel_mask |= 1 << ((base_ch + 1) % 4);
@@ -2967,7 +2998,7 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci,
* EDAC core should be handling the channel mask, in order to point
* to the group of dimm's where the error may be happening.
*/
- if (!pvt->is_lockstep && !pvt->is_mirrored && !pvt->is_close_pg)
+ if (!pvt->is_lockstep && !pvt->is_cur_addr_mirrored && !pvt->is_close_pg)
channel = first_channel;
snprintf(msg, sizeof(msg),
@@ -3125,7 +3156,6 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type)
mci->edac_ctl_cap = EDAC_FLAG_NONE;
mci->edac_cap = EDAC_FLAG_NONE;
mci->mod_name = "sb_edac.c";
- mci->mod_ver = SBRIDGE_REVISION;
mci->dev_name = pci_name(pdev);
mci->ctl_page_to_phys = NULL;
diff --git a/drivers/edac/skx_edac.c b/drivers/edac/skx_edac.c
index 64bef6c9cfb4..16dea97568a1 100644
--- a/drivers/edac/skx_edac.c
+++ b/drivers/edac/skx_edac.c
@@ -31,8 +31,6 @@
#include "edac_module.h"
-#define SKX_REVISION " Ver: 1.0 "
-
/*
* Debug macros
*/
@@ -473,7 +471,6 @@ static int skx_register_mci(struct skx_imc *imc)
mci->edac_cap = EDAC_FLAG_NONE;
mci->mod_name = "skx_edac.c";
mci->dev_name = pci_name(imc->chan[0].cdev);
- mci->mod_ver = SKX_REVISION;
mci->ctl_page_to_phys = NULL;
rc = skx_get_dimm_config(mci);
diff --git a/drivers/edac/synopsys_edac.c b/drivers/edac/synopsys_edac.c
index 1c01dec78ec3..0c9c59e2b5a3 100644
--- a/drivers/edac/synopsys_edac.c
+++ b/drivers/edac/synopsys_edac.c
@@ -413,7 +413,6 @@ static int synps_edac_mc_init(struct mem_ctl_info *mci,
mci->ctl_name = "synps_ddr_controller";
mci->dev_name = SYNPS_EDAC_MOD_STRING;
mci->mod_name = SYNPS_EDAC_MOD_VER;
- mci->mod_ver = "1";
edac_op_state = EDAC_OPSTATE_POLL;
mci->edac_check = synps_edac_check;
diff --git a/drivers/edac/thunderx_edac.c b/drivers/edac/thunderx_edac.c
index 2d352b40ae1c..f35d87519a3e 100644
--- a/drivers/edac/thunderx_edac.c
+++ b/drivers/edac/thunderx_edac.c
@@ -732,7 +732,6 @@ static int thunderx_lmc_probe(struct pci_dev *pdev,
mci->edac_cap = EDAC_FLAG_SECDED;
mci->mod_name = "thunderx-lmc";
- mci->mod_ver = "1";
mci->ctl_name = "thunderx-lmc";
mci->dev_name = dev_name(&pdev->dev);
mci->scrub_mode = SCRUB_NONE;
@@ -775,11 +774,10 @@ static int thunderx_lmc_probe(struct pci_dev *pdev,
lmc->xor_bank = lmc_control & LMC_CONTROL_XOR_BANK;
- l2c_ioaddr = ioremap(L2C_CTL | FIELD_PREP(THUNDERX_NODE, lmc->node),
- PAGE_SIZE);
-
+ l2c_ioaddr = ioremap(L2C_CTL | FIELD_PREP(THUNDERX_NODE, lmc->node), PAGE_SIZE);
if (!l2c_ioaddr) {
dev_err(&pdev->dev, "Cannot map L2C_CTL\n");
+ ret = -ENOMEM;
goto err_free;
}
diff --git a/drivers/edac/x38_edac.c b/drivers/edac/x38_edac.c
index 03c97a4bf590..cc779f3f9e2d 100644
--- a/drivers/edac/x38_edac.c
+++ b/drivers/edac/x38_edac.c
@@ -18,8 +18,6 @@
#include <linux/io-64-nonatomic-lo-hi.h>
#include "edac_module.h"
-#define X38_REVISION "1.1"
-
#define EDAC_MOD_STR "x38_edac"
#define PCI_DEVICE_ID_INTEL_X38_HB 0x29e0
@@ -357,7 +355,6 @@ static int x38_probe1(struct pci_dev *pdev, int dev_idx)
mci->edac_cap = EDAC_FLAG_SECDED;
mci->mod_name = EDAC_MOD_STR;
- mci->mod_ver = X38_REVISION;
mci->ctl_name = x38_devs[dev_idx].ctl_name;
mci->dev_name = pci_name(pdev);
mci->edac_check = x38_check;
diff --git a/drivers/edac/xgene_edac.c b/drivers/edac/xgene_edac.c
index 669246056812..e8b81d7ef61f 100644
--- a/drivers/edac/xgene_edac.c
+++ b/drivers/edac/xgene_edac.c
@@ -415,7 +415,6 @@ static int xgene_edac_mc_add(struct xgene_edac *edac, struct device_node *np)
mci->edac_ctl_cap = EDAC_FLAG_SECDED;
mci->edac_cap = EDAC_FLAG_SECDED;
mci->mod_name = EDAC_MOD_STR;
- mci->mod_ver = "0.1";
mci->ctl_page_to_phys = NULL;
mci->scrub_cap = SCRUB_FLAG_HW_SRC;
mci->scrub_mode = SCRUB_HW_SRC;
diff --git a/drivers/extcon/Kconfig b/drivers/extcon/Kconfig
index 6d50071f07d5..a7bca4207f44 100644
--- a/drivers/extcon/Kconfig
+++ b/drivers/extcon/Kconfig
@@ -150,4 +150,11 @@ config EXTCON_USB_GPIO
Say Y here to enable GPIO based USB cable detection extcon support.
Used typically if GPIO is used for USB ID pin detection.
+config EXTCON_USBC_CROS_EC
+ tristate "ChromeOS Embedded Controller EXTCON support"
+ depends on MFD_CROS_EC
+ help
+ Say Y here to enable USB Type C cable detection extcon support when
+ using Chrome OS EC based USB Type-C ports.
+
endif
diff --git a/drivers/extcon/Makefile b/drivers/extcon/Makefile
index ecfa95804427..a73624e76193 100644
--- a/drivers/extcon/Makefile
+++ b/drivers/extcon/Makefile
@@ -20,3 +20,4 @@ obj-$(CONFIG_EXTCON_QCOM_SPMI_MISC) += extcon-qcom-spmi-misc.o
obj-$(CONFIG_EXTCON_RT8973A) += extcon-rt8973a.o
obj-$(CONFIG_EXTCON_SM5502) += extcon-sm5502.o
obj-$(CONFIG_EXTCON_USB_GPIO) += extcon-usb-gpio.o
+obj-$(CONFIG_EXTCON_USBC_CROS_EC) += extcon-usbc-cros-ec.o
diff --git a/drivers/extcon/devres.c b/drivers/extcon/devres.c
index 186fd735eb28..f599aeddf8e5 100644
--- a/drivers/extcon/devres.c
+++ b/drivers/extcon/devres.c
@@ -1,5 +1,5 @@
/*
- * drivers/extcon/devres.c - EXTCON device's resource management
+ * drivers/extcon/devres.c - EXTCON device's resource management
*
* Copyright (C) 2016 Samsung Electronics
* Author: Chanwoo Choi <cw00.choi@samsung.com>
@@ -59,10 +59,9 @@ static void devm_extcon_dev_notifier_all_unreg(struct device *dev, void *res)
/**
* devm_extcon_dev_allocate - Allocate managed extcon device
- * @dev: device owning the extcon device being created
- * @supported_cable: Array of supported extcon ending with EXTCON_NONE.
- * If supported_cable is NULL, cable name related APIs
- * are disabled.
+ * @dev: the device owning the extcon device being created
+ * @supported_cable: the array of the supported external connectors
+ * ending with EXTCON_NONE.
*
* This function manages automatically the memory of extcon device using device
* resource management and simplify the control of freeing the memory of extcon
@@ -97,8 +96,8 @@ EXPORT_SYMBOL_GPL(devm_extcon_dev_allocate);
/**
* devm_extcon_dev_free() - Resource-managed extcon_dev_unregister()
- * @dev: device the extcon belongs to
- * @edev: the extcon device to unregister
+ * @dev: the device owning the extcon device being created
+ * @edev: the extcon device to be freed
*
* Free the memory that is allocated with devm_extcon_dev_allocate()
* function.
@@ -112,10 +111,9 @@ EXPORT_SYMBOL_GPL(devm_extcon_dev_free);
/**
* devm_extcon_dev_register() - Resource-managed extcon_dev_register()
- * @dev: device to allocate extcon device
- * @edev: the new extcon device to register
+ * @dev: the device owning the extcon device being created
+ * @edev: the extcon device to be registered
*
- * Managed extcon_dev_register() function. If extcon device is attached with
* this function, that extcon device is automatically unregistered on driver
* detach. Internally this function calls extcon_dev_register() function.
* To get more information, refer that function.
@@ -149,8 +147,8 @@ EXPORT_SYMBOL_GPL(devm_extcon_dev_register);
/**
* devm_extcon_dev_unregister() - Resource-managed extcon_dev_unregister()
- * @dev: device the extcon belongs to
- * @edev: the extcon device to unregister
+ * @dev: the device owning the extcon device being created
+ * @edev: the extcon device to unregistered
*
* Unregister extcon device that is registered with devm_extcon_dev_register()
* function.
@@ -164,10 +162,10 @@ EXPORT_SYMBOL_GPL(devm_extcon_dev_unregister);
/**
* devm_extcon_register_notifier() - Resource-managed extcon_register_notifier()
- * @dev: device to allocate extcon device
- * @edev: the extcon device that has the external connecotr.
- * @id: the unique id of each external connector in extcon enumeration.
- * @nb: a notifier block to be registered.
+ * @dev: the device owning the extcon device being created
+ * @edev: the extcon device
+ * @id: the unique id among the extcon enumeration
+ * @nb: a notifier block to be registered
*
* This function manages automatically the notifier of extcon device using
* device resource management and simplify the control of unregistering
@@ -208,10 +206,10 @@ EXPORT_SYMBOL(devm_extcon_register_notifier);
/**
* devm_extcon_unregister_notifier()
- Resource-managed extcon_unregister_notifier()
- * @dev: device to allocate extcon device
- * @edev: the extcon device that has the external connecotr.
- * @id: the unique id of each external connector in extcon enumeration.
- * @nb: a notifier block to be registered.
+ * @dev: the device owning the extcon device being created
+ * @edev: the extcon device
+ * @id: the unique id among the extcon enumeration
+ * @nb: a notifier block to be registered
*/
void devm_extcon_unregister_notifier(struct device *dev,
struct extcon_dev *edev, unsigned int id,
@@ -225,9 +223,9 @@ EXPORT_SYMBOL(devm_extcon_unregister_notifier);
/**
* devm_extcon_register_notifier_all()
* - Resource-managed extcon_register_notifier_all()
- * @dev: device to allocate extcon device
- * @edev: the extcon device that has the external connecotr.
- * @nb: a notifier block to be registered.
+ * @dev: the device owning the extcon device being created
+ * @edev: the extcon device
+ * @nb: a notifier block to be registered
*
* This function manages automatically the notifier of extcon device using
* device resource management and simplify the control of unregistering
@@ -263,9 +261,9 @@ EXPORT_SYMBOL(devm_extcon_register_notifier_all);
/**
* devm_extcon_unregister_notifier_all()
* - Resource-managed extcon_unregister_notifier_all()
- * @dev: device to allocate extcon device
- * @edev: the extcon device that has the external connecotr.
- * @nb: a notifier block to be registered.
+ * @dev: the device owning the extcon device being created
+ * @edev: the extcon device
+ * @nb: a notifier block to be registered
*/
void devm_extcon_unregister_notifier_all(struct device *dev,
struct extcon_dev *edev,
diff --git a/drivers/extcon/extcon-intel-int3496.c b/drivers/extcon/extcon-intel-int3496.c
index d9f9afe45961..1a45e745717d 100644
--- a/drivers/extcon/extcon-intel-int3496.c
+++ b/drivers/extcon/extcon-intel-int3496.c
@@ -171,7 +171,7 @@ static int int3496_remove(struct platform_device *pdev)
return 0;
}
-static struct acpi_device_id int3496_acpi_match[] = {
+static const struct acpi_device_id int3496_acpi_match[] = {
{ "INT3496" },
{ }
};
diff --git a/drivers/extcon/extcon-max77693.c b/drivers/extcon/extcon-max77693.c
index 62163468f205..7a5856809047 100644
--- a/drivers/extcon/extcon-max77693.c
+++ b/drivers/extcon/extcon-max77693.c
@@ -811,9 +811,8 @@ static int max77693_muic_chg_handler(struct max77693_muic_info *info)
*/
extcon_set_state_sync(info->edev, EXTCON_CHG_USB_DCP,
attached);
- if (!cable_attached)
- extcon_set_state_sync(info->edev,
- EXTCON_DISP_MHL, cable_attached);
+ extcon_set_state_sync(info->edev, EXTCON_DISP_MHL,
+ cable_attached);
break;
}
diff --git a/drivers/extcon/extcon-usbc-cros-ec.c b/drivers/extcon/extcon-usbc-cros-ec.c
new file mode 100644
index 000000000000..598956f1dcae
--- /dev/null
+++ b/drivers/extcon/extcon-usbc-cros-ec.c
@@ -0,0 +1,417 @@
+/**
+ * drivers/extcon/extcon-usbc-cros-ec - ChromeOS Embedded Controller extcon
+ *
+ * Copyright (C) 2017 Google, Inc
+ * Author: Benson Leung <bleung@chromium.org>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/extcon.h>
+#include <linux/kernel.h>
+#include <linux/mfd/cros_ec.h>
+#include <linux/module.h>
+#include <linux/notifier.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+
+struct cros_ec_extcon_info {
+ struct device *dev;
+ struct extcon_dev *edev;
+
+ int port_id;
+
+ struct cros_ec_device *ec;
+
+ struct notifier_block notifier;
+
+ bool dp; /* DisplayPort enabled */
+ bool mux; /* SuperSpeed (usb3) enabled */
+ unsigned int power_type;
+};
+
+static const unsigned int usb_type_c_cable[] = {
+ EXTCON_DISP_DP,
+ EXTCON_NONE,
+};
+
+/**
+ * cros_ec_pd_command() - Send a command to the EC.
+ * @info: pointer to struct cros_ec_extcon_info
+ * @command: EC command
+ * @version: EC command version
+ * @outdata: EC command output data
+ * @outsize: Size of outdata
+ * @indata: EC command input data
+ * @insize: Size of indata
+ *
+ * Return: 0 on success, <0 on failure.
+ */
+static int cros_ec_pd_command(struct cros_ec_extcon_info *info,
+ unsigned int command,
+ unsigned int version,
+ void *outdata,
+ unsigned int outsize,
+ void *indata,
+ unsigned int insize)
+{
+ struct cros_ec_command *msg;
+ int ret;
+
+ msg = kzalloc(sizeof(*msg) + max(outsize, insize), GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ msg->version = version;
+ msg->command = command;
+ msg->outsize = outsize;
+ msg->insize = insize;
+
+ if (outsize)
+ memcpy(msg->data, outdata, outsize);
+
+ ret = cros_ec_cmd_xfer_status(info->ec, msg);
+ if (ret >= 0 && insize)
+ memcpy(indata, msg->data, insize);
+
+ kfree(msg);
+ return ret;
+}
+
+/**
+ * cros_ec_usb_get_power_type() - Get power type info about PD device attached
+ * to given port.
+ * @info: pointer to struct cros_ec_extcon_info
+ *
+ * Return: power type on success, <0 on failure.
+ */
+static int cros_ec_usb_get_power_type(struct cros_ec_extcon_info *info)
+{
+ struct ec_params_usb_pd_power_info req;
+ struct ec_response_usb_pd_power_info resp;
+ int ret;
+
+ req.port = info->port_id;
+ ret = cros_ec_pd_command(info, EC_CMD_USB_PD_POWER_INFO, 0,
+ &req, sizeof(req), &resp, sizeof(resp));
+ if (ret < 0)
+ return ret;
+
+ return resp.type;
+}
+
+/**
+ * cros_ec_usb_get_pd_mux_state() - Get PD mux state for given port.
+ * @info: pointer to struct cros_ec_extcon_info
+ *
+ * Return: PD mux state on success, <0 on failure.
+ */
+static int cros_ec_usb_get_pd_mux_state(struct cros_ec_extcon_info *info)
+{
+ struct ec_params_usb_pd_mux_info req;
+ struct ec_response_usb_pd_mux_info resp;
+ int ret;
+
+ req.port = info->port_id;
+ ret = cros_ec_pd_command(info, EC_CMD_USB_PD_MUX_INFO, 0,
+ &req, sizeof(req),
+ &resp, sizeof(resp));
+ if (ret < 0)
+ return ret;
+
+ return resp.flags;
+}
+
+/**
+ * cros_ec_usb_get_role() - Get role info about possible PD device attached to a
+ * given port.
+ * @info: pointer to struct cros_ec_extcon_info
+ * @polarity: pointer to cable polarity (return value)
+ *
+ * Return: role info on success, -ENOTCONN if no cable is connected, <0 on
+ * failure.
+ */
+static int cros_ec_usb_get_role(struct cros_ec_extcon_info *info,
+ bool *polarity)
+{
+ struct ec_params_usb_pd_control pd_control;
+ struct ec_response_usb_pd_control_v1 resp;
+ int ret;
+
+ pd_control.port = info->port_id;
+ pd_control.role = USB_PD_CTRL_ROLE_NO_CHANGE;
+ pd_control.mux = USB_PD_CTRL_MUX_NO_CHANGE;
+ ret = cros_ec_pd_command(info, EC_CMD_USB_PD_CONTROL, 1,
+ &pd_control, sizeof(pd_control),
+ &resp, sizeof(resp));
+ if (ret < 0)
+ return ret;
+
+ if (!(resp.enabled & PD_CTRL_RESP_ENABLED_CONNECTED))
+ return -ENOTCONN;
+
+ *polarity = resp.polarity;
+
+ return resp.role;
+}
+
+/**
+ * cros_ec_pd_get_num_ports() - Get number of EC charge ports.
+ * @info: pointer to struct cros_ec_extcon_info
+ *
+ * Return: number of ports on success, <0 on failure.
+ */
+static int cros_ec_pd_get_num_ports(struct cros_ec_extcon_info *info)
+{
+ struct ec_response_usb_pd_ports resp;
+ int ret;
+
+ ret = cros_ec_pd_command(info, EC_CMD_USB_PD_PORTS,
+ 0, NULL, 0, &resp, sizeof(resp));
+ if (ret < 0)
+ return ret;
+
+ return resp.num_ports;
+}
+
+static int extcon_cros_ec_detect_cable(struct cros_ec_extcon_info *info,
+ bool force)
+{
+ struct device *dev = info->dev;
+ int role, power_type;
+ bool polarity = false;
+ bool dp = false;
+ bool mux = false;
+ bool hpd = false;
+
+ power_type = cros_ec_usb_get_power_type(info);
+ if (power_type < 0) {
+ dev_err(dev, "failed getting power type err = %d\n",
+ power_type);
+ return power_type;
+ }
+
+ role = cros_ec_usb_get_role(info, &polarity);
+ if (role < 0) {
+ if (role != -ENOTCONN) {
+ dev_err(dev, "failed getting role err = %d\n", role);
+ return role;
+ }
+ } else {
+ int pd_mux_state;
+
+ pd_mux_state = cros_ec_usb_get_pd_mux_state(info);
+ if (pd_mux_state < 0)
+ pd_mux_state = USB_PD_MUX_USB_ENABLED;
+
+ dp = pd_mux_state & USB_PD_MUX_DP_ENABLED;
+ mux = pd_mux_state & USB_PD_MUX_USB_ENABLED;
+ hpd = pd_mux_state & USB_PD_MUX_HPD_IRQ;
+ }
+
+ if (force || info->dp != dp || info->mux != mux ||
+ info->power_type != power_type) {
+
+ info->dp = dp;
+ info->mux = mux;
+ info->power_type = power_type;
+
+ extcon_set_state(info->edev, EXTCON_DISP_DP, dp);
+
+ extcon_set_property(info->edev, EXTCON_DISP_DP,
+ EXTCON_PROP_USB_TYPEC_POLARITY,
+ (union extcon_property_value)(int)polarity);
+ extcon_set_property(info->edev, EXTCON_DISP_DP,
+ EXTCON_PROP_USB_SS,
+ (union extcon_property_value)(int)mux);
+ extcon_set_property(info->edev, EXTCON_DISP_DP,
+ EXTCON_PROP_DISP_HPD,
+ (union extcon_property_value)(int)hpd);
+
+ extcon_sync(info->edev, EXTCON_DISP_DP);
+
+ } else if (hpd) {
+ extcon_set_property(info->edev, EXTCON_DISP_DP,
+ EXTCON_PROP_DISP_HPD,
+ (union extcon_property_value)(int)hpd);
+ extcon_sync(info->edev, EXTCON_DISP_DP);
+ }
+
+ return 0;
+}
+
+static int extcon_cros_ec_event(struct notifier_block *nb,
+ unsigned long queued_during_suspend,
+ void *_notify)
+{
+ struct cros_ec_extcon_info *info;
+ struct cros_ec_device *ec;
+ u32 host_event;
+
+ info = container_of(nb, struct cros_ec_extcon_info, notifier);
+ ec = info->ec;
+
+ host_event = cros_ec_get_host_event(ec);
+ if (host_event & (EC_HOST_EVENT_MASK(EC_HOST_EVENT_PD_MCU) |
+ EC_HOST_EVENT_MASK(EC_HOST_EVENT_USB_MUX))) {
+ extcon_cros_ec_detect_cable(info, false);
+ return NOTIFY_OK;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static int extcon_cros_ec_probe(struct platform_device *pdev)
+{
+ struct cros_ec_extcon_info *info;
+ struct cros_ec_device *ec = dev_get_drvdata(pdev->dev.parent);
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ int numports, ret;
+
+ info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ info->dev = dev;
+ info->ec = ec;
+
+ if (np) {
+ u32 port;
+
+ ret = of_property_read_u32(np, "google,usb-port-id", &port);
+ if (ret < 0) {
+ dev_err(dev, "Missing google,usb-port-id property\n");
+ return ret;
+ }
+ info->port_id = port;
+ } else {
+ info->port_id = pdev->id;
+ }
+
+ numports = cros_ec_pd_get_num_ports(info);
+ if (numports < 0) {
+ dev_err(dev, "failed getting number of ports! ret = %d\n",
+ numports);
+ return numports;
+ }
+
+ if (info->port_id >= numports) {
+ dev_err(dev, "This system only supports %d ports\n", numports);
+ return -ENODEV;
+ }
+
+ info->edev = devm_extcon_dev_allocate(dev, usb_type_c_cable);
+ if (IS_ERR(info->edev)) {
+ dev_err(dev, "failed to allocate extcon device\n");
+ return -ENOMEM;
+ }
+
+ ret = devm_extcon_dev_register(dev, info->edev);
+ if (ret < 0) {
+ dev_err(dev, "failed to register extcon device\n");
+ return ret;
+ }
+
+ extcon_set_property_capability(info->edev, EXTCON_DISP_DP,
+ EXTCON_PROP_USB_TYPEC_POLARITY);
+ extcon_set_property_capability(info->edev, EXTCON_DISP_DP,
+ EXTCON_PROP_USB_SS);
+ extcon_set_property_capability(info->edev, EXTCON_DISP_DP,
+ EXTCON_PROP_DISP_HPD);
+
+ platform_set_drvdata(pdev, info);
+
+ /* Get PD events from the EC */
+ info->notifier.notifier_call = extcon_cros_ec_event;
+ ret = blocking_notifier_chain_register(&info->ec->event_notifier,
+ &info->notifier);
+ if (ret < 0) {
+ dev_err(dev, "failed to register notifier\n");
+ return ret;
+ }
+
+ /* Perform initial detection */
+ ret = extcon_cros_ec_detect_cable(info, true);
+ if (ret < 0) {
+ dev_err(dev, "failed to detect initial cable state\n");
+ goto unregister_notifier;
+ }
+
+ return 0;
+
+unregister_notifier:
+ blocking_notifier_chain_unregister(&info->ec->event_notifier,
+ &info->notifier);
+ return ret;
+}
+
+static int extcon_cros_ec_remove(struct platform_device *pdev)
+{
+ struct cros_ec_extcon_info *info = platform_get_drvdata(pdev);
+
+ blocking_notifier_chain_unregister(&info->ec->event_notifier,
+ &info->notifier);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int extcon_cros_ec_suspend(struct device *dev)
+{
+ return 0;
+}
+
+static int extcon_cros_ec_resume(struct device *dev)
+{
+ int ret;
+ struct cros_ec_extcon_info *info = dev_get_drvdata(dev);
+
+ ret = extcon_cros_ec_detect_cable(info, true);
+ if (ret < 0)
+ dev_err(dev, "failed to detect cable state on resume\n");
+
+ return 0;
+}
+
+static const struct dev_pm_ops extcon_cros_ec_dev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(extcon_cros_ec_suspend, extcon_cros_ec_resume)
+};
+
+#define DEV_PM_OPS (&extcon_cros_ec_dev_pm_ops)
+#else
+#define DEV_PM_OPS NULL
+#endif /* CONFIG_PM_SLEEP */
+
+#ifdef CONFIG_OF
+static const struct of_device_id extcon_cros_ec_of_match[] = {
+ { .compatible = "google,extcon-usbc-cros-ec" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, extcon_cros_ec_of_match);
+#endif /* CONFIG_OF */
+
+static struct platform_driver extcon_cros_ec_driver = {
+ .driver = {
+ .name = "extcon-usbc-cros-ec",
+ .of_match_table = of_match_ptr(extcon_cros_ec_of_match),
+ .pm = DEV_PM_OPS,
+ },
+ .remove = extcon_cros_ec_remove,
+ .probe = extcon_cros_ec_probe,
+};
+
+module_platform_driver(extcon_cros_ec_driver);
+
+MODULE_DESCRIPTION("ChromeOS Embedded Controller extcon driver");
+MODULE_AUTHOR("Benson Leung <bleung@chromium.org>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/extcon/extcon.c b/drivers/extcon/extcon.c
index 8eccf7b14937..35e9fb885486 100644
--- a/drivers/extcon/extcon.c
+++ b/drivers/extcon/extcon.c
@@ -1,7 +1,5 @@
/*
- * drivers/extcon/extcon.c - External Connector (extcon) framework.
- *
- * External connector (extcon) class driver
+ * drivers/extcon/extcon.c - External Connector (extcon) framework.
*
* Copyright (C) 2015 Samsung Electronics
* Author: Chanwoo Choi <cw00.choi@samsung.com>
@@ -37,7 +35,6 @@
#include "extcon.h"
#define SUPPORTED_CABLE_MAX 32
-#define CABLE_NAME_MAX 30
struct __extcon_info {
unsigned int type;
@@ -200,13 +197,13 @@ struct __extcon_info {
};
/**
- * struct extcon_cable - An internal data for each cable of extcon device.
- * @edev: The extcon device
- * @cable_index: Index of this cable in the edev
- * @attr_g: Attribute group for the cable
+ * struct extcon_cable - An internal data for an external connector.
+ * @edev: the extcon device
+ * @cable_index: the index of this cable in the edev
+ * @attr_g: the attribute group for the cable
* @attr_name: "name" sysfs entry
* @attr_state: "state" sysfs entry
- * @attrs: Array pointing to attr_name and attr_state for attr_g
+ * @attrs: the array pointing to attr_name and attr_state for attr_g
*/
struct extcon_cable {
struct extcon_dev *edev;
@@ -234,15 +231,6 @@ static struct class *extcon_class;
static LIST_HEAD(extcon_dev_list);
static DEFINE_MUTEX(extcon_dev_list_lock);
-/**
- * check_mutually_exclusive - Check if new_state violates mutually_exclusive
- * condition.
- * @edev: the extcon device
- * @new_state: new cable attach status for @edev
- *
- * Returns 0 if nothing violates. Returns the index + 1 for the first
- * violated condition.
- */
static int check_mutually_exclusive(struct extcon_dev *edev, u32 new_state)
{
int i = 0;
@@ -417,11 +405,13 @@ static ssize_t cable_state_show(struct device *dev,
}
/**
- * extcon_sync() - Synchronize the states for both the attached/detached
- * @edev: the extcon device that has the cable.
+ * extcon_sync() - Synchronize the state for an external connector.
+ * @edev: the extcon device
+ *
+ * Note that this function send a notification in order to synchronize
+ * the state and property of an external connector.
*
- * This function send a notification to synchronize the all states of a
- * specific external connector
+ * Returns 0 if success or error number if fail.
*/
int extcon_sync(struct extcon_dev *edev, unsigned int id)
{
@@ -497,9 +487,11 @@ int extcon_sync(struct extcon_dev *edev, unsigned int id)
EXPORT_SYMBOL_GPL(extcon_sync);
/**
- * extcon_get_state() - Get the state of a external connector.
- * @edev: the extcon device that has the cable.
- * @id: the unique id of each external connector in extcon enumeration.
+ * extcon_get_state() - Get the state of an external connector.
+ * @edev: the extcon device
+ * @id: the unique id indicating an external connector
+ *
+ * Returns 0 if success or error number if fail.
*/
int extcon_get_state(struct extcon_dev *edev, const unsigned int id)
{
@@ -522,20 +514,19 @@ int extcon_get_state(struct extcon_dev *edev, const unsigned int id)
EXPORT_SYMBOL_GPL(extcon_get_state);
/**
- * extcon_set_state() - Set the state of a external connector.
- * without a notification.
- * @edev: the extcon device that has the cable.
- * @id: the unique id of each external connector
- * in extcon enumeration.
- * @state: the new cable status. The default semantics is
- * true: attached / false: detached.
+ * extcon_set_state() - Set the state of an external connector.
+ * @edev: the extcon device
+ * @id: the unique id indicating an external connector
+ * @state: the new state of an external connector.
+ * the default semantics is true: attached / false: detached.
+ *
+ * Note that this function set the state of an external connector without
+ * a notification. To synchronize the state of an external connector,
+ * have to use extcon_set_state_sync() and extcon_sync().
*
- * This function only set the state of a external connector without
- * a notification. To synchronize the data of a external connector,
- * use extcon_set_state_sync() and extcon_sync().
+ * Returns 0 if success or error number if fail.
*/
-int extcon_set_state(struct extcon_dev *edev, unsigned int id,
- bool cable_state)
+int extcon_set_state(struct extcon_dev *edev, unsigned int id, bool state)
{
unsigned long flags;
int index, ret = 0;
@@ -550,11 +541,11 @@ int extcon_set_state(struct extcon_dev *edev, unsigned int id,
spin_lock_irqsave(&edev->lock, flags);
/* Check whether the external connector's state is changed. */
- if (!is_extcon_changed(edev, index, cable_state))
+ if (!is_extcon_changed(edev, index, state))
goto out;
if (check_mutually_exclusive(edev,
- (edev->state & ~BIT(index)) | (cable_state & BIT(index)))) {
+ (edev->state & ~BIT(index)) | (state & BIT(index)))) {
ret = -EPERM;
goto out;
}
@@ -563,11 +554,11 @@ int extcon_set_state(struct extcon_dev *edev, unsigned int id,
* Initialize the value of extcon property before setting
* the detached state for an external connector.
*/
- if (!cable_state)
+ if (!state)
init_property(edev, id, index);
- /* Update the state for a external connector. */
- if (cable_state)
+ /* Update the state for an external connector. */
+ if (state)
edev->state |= BIT(index);
else
edev->state &= ~(BIT(index));
@@ -579,19 +570,18 @@ out:
EXPORT_SYMBOL_GPL(extcon_set_state);
/**
- * extcon_set_state_sync() - Set the state of a external connector
- * with a notification.
- * @edev: the extcon device that has the cable.
- * @id: the unique id of each external connector
- * in extcon enumeration.
- * @state: the new cable status. The default semantics is
- * true: attached / false: detached.
+ * extcon_set_state_sync() - Set the state of an external connector with sync.
+ * @edev: the extcon device
+ * @id: the unique id indicating an external connector
+ * @state: the new state of external connector.
+ * the default semantics is true: attached / false: detached.
+ *
+ * Note that this function set the state of external connector
+ * and synchronize the state by sending a notification.
*
- * This function set the state of external connector and synchronize the data
- * by usning a notification.
+ * Returns 0 if success or error number if fail.
*/
-int extcon_set_state_sync(struct extcon_dev *edev, unsigned int id,
- bool cable_state)
+int extcon_set_state_sync(struct extcon_dev *edev, unsigned int id, bool state)
{
int ret, index;
unsigned long flags;
@@ -602,12 +592,12 @@ int extcon_set_state_sync(struct extcon_dev *edev, unsigned int id,
/* Check whether the external connector's state is changed. */
spin_lock_irqsave(&edev->lock, flags);
- ret = is_extcon_changed(edev, index, cable_state);
+ ret = is_extcon_changed(edev, index, state);
spin_unlock_irqrestore(&edev->lock, flags);
if (!ret)
return 0;
- ret = extcon_set_state(edev, id, cable_state);
+ ret = extcon_set_state(edev, id, state);
if (ret < 0)
return ret;
@@ -616,19 +606,18 @@ int extcon_set_state_sync(struct extcon_dev *edev, unsigned int id,
EXPORT_SYMBOL_GPL(extcon_set_state_sync);
/**
- * extcon_get_property() - Get the property value of a specific cable.
- * @edev: the extcon device that has the cable.
- * @id: the unique id of each external connector
- * in extcon enumeration.
- * @prop: the property id among enum extcon_property.
- * @prop_val: the pointer which store the value of property.
+ * extcon_get_property() - Get the property value of an external connector.
+ * @edev: the extcon device
+ * @id: the unique id indicating an external connector
+ * @prop: the property id indicating an extcon property
+ * @prop_val: the pointer which store the value of extcon property
*
- * When getting the property value of external connector, the external connector
- * should be attached. If detached state, function just return 0 without
- * property value. Also, the each property should be included in the list of
- * supported properties according to the type of external connectors.
+ * Note that when getting the property value of external connector,
+ * the external connector should be attached. If detached state, function
+ * return 0 without property value. Also, the each property should be
+ * included in the list of supported properties according to extcon type.
*
- * Returns 0 if success or error number if fail
+ * Returns 0 if success or error number if fail.
*/
int extcon_get_property(struct extcon_dev *edev, unsigned int id,
unsigned int prop,
@@ -698,17 +687,16 @@ int extcon_get_property(struct extcon_dev *edev, unsigned int id,
EXPORT_SYMBOL_GPL(extcon_get_property);
/**
- * extcon_set_property() - Set the property value of a specific cable.
- * @edev: the extcon device that has the cable.
- * @id: the unique id of each external connector
- * in extcon enumeration.
- * @prop: the property id among enum extcon_property.
- * @prop_val: the pointer including the new value of property.
+ * extcon_set_property() - Set the property value of an external connector.
+ * @edev: the extcon device
+ * @id: the unique id indicating an external connector
+ * @prop: the property id indicating an extcon property
+ * @prop_val: the pointer including the new value of extcon property
*
- * The each property should be included in the list of supported properties
- * according to the type of external connectors.
+ * Note that each property should be included in the list of supported
+ * properties according to the extcon type.
*
- * Returns 0 if success or error number if fail
+ * Returns 0 if success or error number if fail.
*/
int extcon_set_property(struct extcon_dev *edev, unsigned int id,
unsigned int prop,
@@ -766,15 +754,14 @@ int extcon_set_property(struct extcon_dev *edev, unsigned int id,
EXPORT_SYMBOL_GPL(extcon_set_property);
/**
- * extcon_set_property_sync() - Set the property value of a specific cable
- with a notification.
- * @prop_val: the pointer including the new value of property.
+ * extcon_set_property_sync() - Set property of an external connector with sync.
+ * @prop_val: the pointer including the new value of extcon property
*
- * When setting the property value of external connector, the external connector
- * should be attached. The each property should be included in the list of
- * supported properties according to the type of external connectors.
+ * Note that when setting the property value of external connector,
+ * the external connector should be attached. The each property should
+ * be included in the list of supported properties according to extcon type.
*
- * Returns 0 if success or error number if fail
+ * Returns 0 if success or error number if fail.
*/
int extcon_set_property_sync(struct extcon_dev *edev, unsigned int id,
unsigned int prop,
@@ -791,12 +778,11 @@ int extcon_set_property_sync(struct extcon_dev *edev, unsigned int id,
EXPORT_SYMBOL_GPL(extcon_set_property_sync);
/**
- * extcon_get_property_capability() - Get the capability of property
- * of an external connector.
- * @edev: the extcon device that has the cable.
- * @id: the unique id of each external connector
- * in extcon enumeration.
- * @prop: the property id among enum extcon_property.
+ * extcon_get_property_capability() - Get the capability of the property
+ * for an external connector.
+ * @edev: the extcon device
+ * @id: the unique id indicating an external connector
+ * @prop: the property id indicating an extcon property
*
* Returns 1 if the property is available or 0 if not available.
*/
@@ -822,18 +808,17 @@ int extcon_get_property_capability(struct extcon_dev *edev, unsigned int id,
EXPORT_SYMBOL_GPL(extcon_get_property_capability);
/**
- * extcon_set_property_capability() - Set the capability of a property
- * of an external connector.
- * @edev: the extcon device that has the cable.
- * @id: the unique id of each external connector
- * in extcon enumeration.
- * @prop: the property id among enum extcon_property.
+ * extcon_set_property_capability() - Set the capability of the property
+ * for an external connector.
+ * @edev: the extcon device
+ * @id: the unique id indicating an external connector
+ * @prop: the property id indicating an extcon property
*
- * This function set the capability of a property for an external connector
- * to mark the bit in capability bitmap which mean the available state of
- * a property.
+ * Note that this function set the capability of the property
+ * for an external connector in order to mark the bit in capability
+ * bitmap which mean the available state of the property.
*
- * Returns 0 if success or error number if fail
+ * Returns 0 if success or error number if fail.
*/
int extcon_set_property_capability(struct extcon_dev *edev, unsigned int id,
unsigned int prop)
@@ -881,8 +866,10 @@ int extcon_set_property_capability(struct extcon_dev *edev, unsigned int id,
EXPORT_SYMBOL_GPL(extcon_set_property_capability);
/**
- * extcon_get_extcon_dev() - Get the extcon device instance from the name
- * @extcon_name: The extcon name provided with extcon_dev_register()
+ * extcon_get_extcon_dev() - Get the extcon device instance from the name.
+ * @extcon_name: the extcon name provided with extcon_dev_register()
+ *
+ * Return the pointer of extcon device if success or ERR_PTR(err) if fail.
*/
struct extcon_dev *extcon_get_extcon_dev(const char *extcon_name)
{
@@ -904,15 +891,17 @@ out:
EXPORT_SYMBOL_GPL(extcon_get_extcon_dev);
/**
- * extcon_register_notifier() - Register a notifiee to get notified by
- * any attach status changes from the extcon.
- * @edev: the extcon device that has the external connecotr.
- * @id: the unique id of each external connector in extcon enumeration.
- * @nb: a notifier block to be registered.
+ * extcon_register_notifier() - Register a notifier block to get notified by
+ * any state changes from the extcon.
+ * @edev: the extcon device
+ * @id: the unique id indicating an external connector
+ * @nb: a notifier block to be registered
*
* Note that the second parameter given to the callback of nb (val) is
- * "old_state", not the current state. The current state can be retrieved
- * by looking at the third pameter (edev pointer)'s state value.
+ * the current state of an external connector and the third pameter
+ * is the pointer of extcon device.
+ *
+ * Returns 0 if success or error number if fail.
*/
int extcon_register_notifier(struct extcon_dev *edev, unsigned int id,
struct notifier_block *nb)
@@ -936,10 +925,12 @@ int extcon_register_notifier(struct extcon_dev *edev, unsigned int id,
EXPORT_SYMBOL_GPL(extcon_register_notifier);
/**
- * extcon_unregister_notifier() - Unregister a notifiee from the extcon device.
- * @edev: the extcon device that has the external connecotr.
- * @id: the unique id of each external connector in extcon enumeration.
- * @nb: a notifier block to be registered.
+ * extcon_unregister_notifier() - Unregister a notifier block from the extcon.
+ * @edev: the extcon device
+ * @id: the unique id indicating an external connector
+ * @nb: a notifier block to be registered
+ *
+ * Returns 0 if success or error number if fail.
*/
int extcon_unregister_notifier(struct extcon_dev *edev, unsigned int id,
struct notifier_block *nb)
@@ -963,16 +954,16 @@ int extcon_unregister_notifier(struct extcon_dev *edev, unsigned int id,
EXPORT_SYMBOL_GPL(extcon_unregister_notifier);
/**
- * extcon_register_notifier_all() - Register a notifier block for all connectors
- * @edev: the extcon device that has the external connector.
- * @nb: a notifier block to be registered.
+ * extcon_register_notifier_all() - Register a notifier block for all connectors.
+ * @edev: the extcon device
+ * @nb: a notifier block to be registered
*
- * This function registers a notifier block in order to receive the state
- * change of all supported external connectors from extcon device.
+ * Note that this function registers a notifier block in order to receive
+ * the state change of all supported external connectors from extcon device.
* And the second parameter given to the callback of nb (val) is
- * the current state and third parameter is the edev pointer.
+ * the current state and the third pameter is the pointer of extcon device.
*
- * Returns 0 if success or error number if fail
+ * Returns 0 if success or error number if fail.
*/
int extcon_register_notifier_all(struct extcon_dev *edev,
struct notifier_block *nb)
@@ -993,10 +984,10 @@ EXPORT_SYMBOL_GPL(extcon_register_notifier_all);
/**
* extcon_unregister_notifier_all() - Unregister a notifier block from extcon.
- * @edev: the extcon device that has the external connecotr.
- * @nb: a notifier block to be registered.
+ * @edev: the extcon device
+ * @nb: a notifier block to be registered
*
- * Returns 0 if success or error number if fail
+ * Returns 0 if success or error number if fail.
*/
int extcon_unregister_notifier_all(struct extcon_dev *edev,
struct notifier_block *nb)
@@ -1045,15 +1036,14 @@ static void dummy_sysfs_dev_release(struct device *dev)
/*
* extcon_dev_allocate() - Allocate the memory of extcon device.
- * @supported_cable: Array of supported extcon ending with EXTCON_NONE.
- * If supported_cable is NULL, cable name related APIs
- * are disabled.
+ * @supported_cable: the array of the supported external connectors
+ * ending with EXTCON_NONE.
*
- * This function allocates the memory for extcon device without allocating
- * memory in each extcon provider driver and initialize default setting for
- * extcon device.
+ * Note that this function allocates the memory for extcon device
+ * and initialize default setting for the extcon device.
*
- * Return the pointer of extcon device if success or ERR_PTR(err) if fail
+ * Returns the pointer memory of allocated extcon_dev if success
+ * or ERR_PTR(err) if fail.
*/
struct extcon_dev *extcon_dev_allocate(const unsigned int *supported_cable)
{
@@ -1074,7 +1064,7 @@ struct extcon_dev *extcon_dev_allocate(const unsigned int *supported_cable)
/*
* extcon_dev_free() - Free the memory of extcon device.
- * @edev: the extcon device to free
+ * @edev: the extcon device
*/
void extcon_dev_free(struct extcon_dev *edev)
{
@@ -1083,13 +1073,18 @@ void extcon_dev_free(struct extcon_dev *edev)
EXPORT_SYMBOL_GPL(extcon_dev_free);
/**
- * extcon_dev_register() - Register a new extcon device
- * @edev : the new extcon device (should be allocated before calling)
+ * extcon_dev_register() - Register an new extcon device
+ * @edev: the extcon device to be registered
*
* Among the members of edev struct, please set the "user initializing data"
- * in any case and set the "optional callbacks" if required. However, please
* do not set the values of "internal data", which are initialized by
* this function.
+ *
+ * Note that before calling this funciton, have to allocate the memory
+ * of an extcon device by using the extcon_dev_allocate(). And the extcon
+ * dev should include the supported_cable information.
+ *
+ * Returns 0 if success or error number if fail.
*/
int extcon_dev_register(struct extcon_dev *edev)
{
@@ -1296,7 +1291,7 @@ EXPORT_SYMBOL_GPL(extcon_dev_register);
/**
* extcon_dev_unregister() - Unregister the extcon device.
- * @edev: the extcon device instance to be unregistered.
+ * @edev: the extcon device to be unregistered.
*
* Note that this does not call kfree(edev) because edev was not allocated
* by this class.
@@ -1342,11 +1337,11 @@ EXPORT_SYMBOL_GPL(extcon_dev_unregister);
#ifdef CONFIG_OF
/*
- * extcon_get_edev_by_phandle - Get the extcon device from devicetree
- * @dev - instance to the given device
- * @index - index into list of extcon_dev
+ * extcon_get_edev_by_phandle - Get the extcon device from devicetree.
+ * @dev : the instance to the given device
+ * @index : the index into list of extcon_dev
*
- * return the instance of extcon device
+ * Return the pointer of extcon device if success or ERR_PTR(err) if fail.
*/
struct extcon_dev *extcon_get_edev_by_phandle(struct device *dev, int index)
{
@@ -1363,8 +1358,8 @@ struct extcon_dev *extcon_get_edev_by_phandle(struct device *dev, int index)
node = of_parse_phandle(dev->of_node, "extcon", index);
if (!node) {
- dev_dbg(dev, "failed to get phandle in %s node\n",
- dev->of_node->full_name);
+ dev_dbg(dev, "failed to get phandle in %pOF node\n",
+ dev->of_node);
return ERR_PTR(-ENODEV);
}
@@ -1411,8 +1406,6 @@ static void __exit extcon_class_exit(void)
module_exit(extcon_class_exit);
MODULE_AUTHOR("Chanwoo Choi <cw00.choi@samsung.com>");
-MODULE_AUTHOR("Mike Lockwood <lockwood@android.com>");
-MODULE_AUTHOR("Donggeun Kim <dg77.kim@samsung.com>");
MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
-MODULE_DESCRIPTION("External connector (extcon) class driver");
-MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("External Connector (extcon) framework");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/firmware/arm_scpi.c b/drivers/firmware/arm_scpi.c
index 8043e51de897..7da9f1b83ebe 100644
--- a/drivers/firmware/arm_scpi.c
+++ b/drivers/firmware/arm_scpi.c
@@ -357,7 +357,7 @@ struct sensor_value {
} __packed;
struct dev_pstate_set {
- u16 dev_id;
+ __le16 dev_id;
u8 pstate;
} __packed;
@@ -965,7 +965,7 @@ static int scpi_probe(struct platform_device *pdev)
count = of_count_phandle_with_args(np, "mboxes", "#mbox-cells");
if (count < 0) {
- dev_err(dev, "no mboxes property in '%s'\n", np->full_name);
+ dev_err(dev, "no mboxes property in '%pOF'\n", np);
return -ENODEV;
}
diff --git a/drivers/firmware/dcdbas.c b/drivers/firmware/dcdbas.c
index 2fe1a130189f..c16600f30611 100644
--- a/drivers/firmware/dcdbas.c
+++ b/drivers/firmware/dcdbas.c
@@ -534,7 +534,7 @@ static struct attribute *dcdbas_dev_attrs[] = {
NULL
};
-static struct attribute_group dcdbas_attr_group = {
+static const struct attribute_group dcdbas_attr_group = {
.attrs = dcdbas_dev_attrs,
.bin_attrs = dcdbas_bin_attrs,
};
diff --git a/drivers/firmware/dmi-sysfs.c b/drivers/firmware/dmi-sysfs.c
index ef76e5eecf0b..d5de6ee8466d 100644
--- a/drivers/firmware/dmi-sysfs.c
+++ b/drivers/firmware/dmi-sysfs.c
@@ -25,6 +25,7 @@
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/io.h>
+#include <asm/dmi.h>
#define MAX_ENTRY_TYPE 255 /* Most of these aren't used, but we consider
the top entry type is only 8 bits */
@@ -380,7 +381,7 @@ static ssize_t dmi_sel_raw_read_phys32(struct dmi_sysfs_entry *entry,
u8 __iomem *mapped;
ssize_t wrote = 0;
- mapped = ioremap(sel->access_method_address, sel->area_length);
+ mapped = dmi_remap(sel->access_method_address, sel->area_length);
if (!mapped)
return -EIO;
@@ -390,7 +391,7 @@ static ssize_t dmi_sel_raw_read_phys32(struct dmi_sysfs_entry *entry,
wrote++;
}
- iounmap(mapped);
+ dmi_unmap(mapped);
return wrote;
}
diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig
index 394db40ed374..2b4c39fdfa91 100644
--- a/drivers/firmware/efi/Kconfig
+++ b/drivers/firmware/efi/Kconfig
@@ -151,6 +151,16 @@ config APPLE_PROPERTIES
If unsure, say Y if you have a Mac. Otherwise N.
+config RESET_ATTACK_MITIGATION
+ bool "Reset memory attack mitigation"
+ depends on EFI_STUB
+ help
+ Request that the firmware clear the contents of RAM after a reboot
+ using the TCG Platform Reset Attack Mitigation specification. This
+ protects against an attacker forcibly rebooting the system while it
+ still contains secrets in RAM, booting another OS and extracting the
+ secrets.
+
endmenu
config UEFI_CPER
diff --git a/drivers/firmware/efi/apple-properties.c b/drivers/firmware/efi/apple-properties.c
index c473f4c5ca34..9f6bcf173b0e 100644
--- a/drivers/firmware/efi/apple-properties.c
+++ b/drivers/firmware/efi/apple-properties.c
@@ -18,8 +18,8 @@
#define pr_fmt(fmt) "apple-properties: " fmt
#include <linux/bootmem.h>
-#include <linux/dmi.h>
#include <linux/efi.h>
+#include <linux/platform_data/x86/apple.h>
#include <linux/property.h>
#include <linux/slab.h>
#include <linux/ucs2_string.h>
@@ -191,8 +191,7 @@ static int __init map_properties(void)
u64 pa_data;
int ret;
- if (!dmi_match(DMI_SYS_VENDOR, "Apple Inc.") &&
- !dmi_match(DMI_SYS_VENDOR, "Apple Computer, Inc."))
+ if (!x86_apple_machine)
return 0;
pa_data = boot_params.hdr.setup_data;
diff --git a/drivers/firmware/efi/arm-init.c b/drivers/firmware/efi/arm-init.c
index 1027d7b44358..80d1a885def5 100644
--- a/drivers/firmware/efi/arm-init.c
+++ b/drivers/firmware/efi/arm-init.c
@@ -145,6 +145,9 @@ static int __init uefi_init(void)
sizeof(efi_config_table_t),
arch_tables);
+ if (!retval)
+ efi.config_table = (unsigned long)efi.systab->tables;
+
early_memunmap(config_tables, table_size);
out:
early_memunmap(efi.systab, sizeof(efi_system_table_t));
@@ -159,6 +162,7 @@ static __init int is_usable_memory(efi_memory_desc_t *md)
switch (md->type) {
case EFI_LOADER_CODE:
case EFI_LOADER_DATA:
+ case EFI_ACPI_RECLAIM_MEMORY:
case EFI_BOOT_SERVICES_CODE:
case EFI_BOOT_SERVICES_DATA:
case EFI_CONVENTIONAL_MEMORY:
@@ -211,6 +215,10 @@ static __init void reserve_regions(void)
if (!is_usable_memory(md))
memblock_mark_nomap(paddr, size);
+
+ /* keep ACPI reclaim memory intact for kexec etc. */
+ if (md->type == EFI_ACPI_RECLAIM_MEMORY)
+ memblock_reserve(paddr, size);
}
}
}
diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c
index 48a8f69da42a..d2fcafcea07e 100644
--- a/drivers/firmware/efi/cper.c
+++ b/drivers/firmware/efi/cper.c
@@ -534,7 +534,7 @@ static void
cper_estatus_print_section(const char *pfx, struct acpi_hest_generic_data *gdata,
int sec_no)
{
- uuid_le *sec_type = (uuid_le *)gdata->section_type;
+ guid_t *sec_type = (guid_t *)gdata->section_type;
__u16 severity;
char newpfx[64];
@@ -545,12 +545,12 @@ cper_estatus_print_section(const char *pfx, struct acpi_hest_generic_data *gdata
printk("%s""Error %d, type: %s\n", pfx, sec_no,
cper_severity_str(severity));
if (gdata->validation_bits & CPER_SEC_VALID_FRU_ID)
- printk("%s""fru_id: %pUl\n", pfx, (uuid_le *)gdata->fru_id);
+ printk("%s""fru_id: %pUl\n", pfx, gdata->fru_id);
if (gdata->validation_bits & CPER_SEC_VALID_FRU_TEXT)
printk("%s""fru_text: %.20s\n", pfx, gdata->fru_text);
snprintf(newpfx, sizeof(newpfx), "%s%s", pfx, INDENT_SP);
- if (!uuid_le_cmp(*sec_type, CPER_SEC_PROC_GENERIC)) {
+ if (guid_equal(sec_type, &CPER_SEC_PROC_GENERIC)) {
struct cper_sec_proc_generic *proc_err = acpi_hest_get_payload(gdata);
printk("%s""section_type: general processor error\n", newpfx);
@@ -558,7 +558,7 @@ cper_estatus_print_section(const char *pfx, struct acpi_hest_generic_data *gdata
cper_print_proc_generic(newpfx, proc_err);
else
goto err_section_too_small;
- } else if (!uuid_le_cmp(*sec_type, CPER_SEC_PLATFORM_MEM)) {
+ } else if (guid_equal(sec_type, &CPER_SEC_PLATFORM_MEM)) {
struct cper_sec_mem_err *mem_err = acpi_hest_get_payload(gdata);
printk("%s""section_type: memory error\n", newpfx);
@@ -568,7 +568,7 @@ cper_estatus_print_section(const char *pfx, struct acpi_hest_generic_data *gdata
gdata->error_data_length);
else
goto err_section_too_small;
- } else if (!uuid_le_cmp(*sec_type, CPER_SEC_PCIE)) {
+ } else if (guid_equal(sec_type, &CPER_SEC_PCIE)) {
struct cper_sec_pcie *pcie = acpi_hest_get_payload(gdata);
printk("%s""section_type: PCIe error\n", newpfx);
@@ -606,7 +606,6 @@ void cper_estatus_print(const char *pfx,
const struct acpi_hest_generic_status *estatus)
{
struct acpi_hest_generic_data *gdata;
- unsigned int data_len;
int sec_no = 0;
char newpfx[64];
__u16 severity;
@@ -617,14 +616,10 @@ void cper_estatus_print(const char *pfx,
"It has been corrected by h/w "
"and requires no further action");
printk("%s""event severity: %s\n", pfx, cper_severity_str(severity));
- data_len = estatus->data_length;
- gdata = (struct acpi_hest_generic_data *)(estatus + 1);
snprintf(newpfx, sizeof(newpfx), "%s%s", pfx, INDENT_SP);
- while (data_len >= acpi_hest_get_size(gdata)) {
+ apei_estatus_for_each_section(estatus, gdata) {
cper_estatus_print_section(newpfx, gdata, sec_no);
- data_len -= acpi_hest_get_record_size(gdata);
- gdata = acpi_hest_get_next(gdata);
sec_no++;
}
}
@@ -653,15 +648,12 @@ int cper_estatus_check(const struct acpi_hest_generic_status *estatus)
if (rc)
return rc;
data_len = estatus->data_length;
- gdata = (struct acpi_hest_generic_data *)(estatus + 1);
- while (data_len >= acpi_hest_get_size(gdata)) {
+ apei_estatus_for_each_section(estatus, gdata) {
gedata_len = acpi_hest_get_error_length(gdata);
if (gedata_len > data_len - acpi_hest_get_size(gdata))
return -EINVAL;
-
data_len -= acpi_hest_get_record_size(gdata);
- gdata = acpi_hest_get_next(gdata);
}
if (data_len)
return -EINVAL;
diff --git a/drivers/firmware/efi/efi-bgrt.c b/drivers/firmware/efi/efi-bgrt.c
index b58233e4ed71..50793fda7819 100644
--- a/drivers/firmware/efi/efi-bgrt.c
+++ b/drivers/firmware/efi/efi-bgrt.c
@@ -27,26 +27,6 @@ struct bmp_header {
u32 size;
} __packed;
-static bool efi_bgrt_addr_valid(u64 addr)
-{
- efi_memory_desc_t *md;
-
- for_each_efi_memory_desc(md) {
- u64 size;
- u64 end;
-
- if (md->type != EFI_BOOT_SERVICES_DATA)
- continue;
-
- size = md->num_pages << EFI_PAGE_SHIFT;
- end = md->phys_addr + size;
- if (addr >= md->phys_addr && addr < end)
- return true;
- }
-
- return false;
-}
-
void __init efi_bgrt_init(struct acpi_table_header *table)
{
void *image;
@@ -85,7 +65,7 @@ void __init efi_bgrt_init(struct acpi_table_header *table)
goto out;
}
- if (!efi_bgrt_addr_valid(bgrt->image_address)) {
+ if (efi_mem_type(bgrt->image_address) != EFI_BOOT_SERVICES_DATA) {
pr_notice("Ignoring BGRT: invalid image address\n");
goto out;
}
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 045d6d311bde..f70febf680c3 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -55,6 +55,25 @@ struct efi __read_mostly efi = {
};
EXPORT_SYMBOL(efi);
+static unsigned long *efi_tables[] = {
+ &efi.mps,
+ &efi.acpi,
+ &efi.acpi20,
+ &efi.smbios,
+ &efi.smbios3,
+ &efi.sal_systab,
+ &efi.boot_info,
+ &efi.hcdp,
+ &efi.uga,
+ &efi.uv_systab,
+ &efi.fw_vendor,
+ &efi.runtime,
+ &efi.config_table,
+ &efi.esrt,
+ &efi.properties_table,
+ &efi.mem_attr_table,
+};
+
static bool disable_runtime;
static int __init setup_noefi(char *arg)
{
@@ -179,7 +198,7 @@ static umode_t efi_attr_is_visible(struct kobject *kobj,
return attr->mode;
}
-static struct attribute_group efi_subsys_attr_group = {
+static const struct attribute_group efi_subsys_attr_group = {
.attrs = efi_subsys_attrs,
.is_visible = efi_attr_is_visible,
};
@@ -522,6 +541,7 @@ int __init efi_config_parse_tables(void *config_tables, int count, int sz,
if (seed != NULL) {
add_device_randomness(seed->bits, seed->size);
early_memunmap(seed, sizeof(*seed) + size);
+ pr_notice("seeding entropy pool\n");
} else {
pr_err("Could not map UEFI random seed!\n");
}
@@ -791,19 +811,19 @@ char * __init efi_md_typeattr_format(char *buf, size_t size,
}
/*
+ * IA64 has a funky EFI memory map that doesn't work the same way as
+ * other architectures.
+ */
+#ifndef CONFIG_IA64
+/*
* efi_mem_attributes - lookup memmap attributes for physical address
* @phys_addr: the physical address to lookup
*
* Search in the EFI memory map for the region covering
* @phys_addr. Returns the EFI memory attributes if the region
* was found in the memory map, 0 otherwise.
- *
- * Despite being marked __weak, most architectures should *not*
- * override this function. It is __weak solely for the benefit
- * of ia64 which has a funky EFI memory map that doesn't work
- * the same way as other architectures.
*/
-u64 __weak efi_mem_attributes(unsigned long phys_addr)
+u64 efi_mem_attributes(unsigned long phys_addr)
{
efi_memory_desc_t *md;
@@ -819,6 +839,31 @@ u64 __weak efi_mem_attributes(unsigned long phys_addr)
return 0;
}
+/*
+ * efi_mem_type - lookup memmap type for physical address
+ * @phys_addr: the physical address to lookup
+ *
+ * Search in the EFI memory map for the region covering @phys_addr.
+ * Returns the EFI memory type if the region was found in the memory
+ * map, EFI_RESERVED_TYPE (zero) otherwise.
+ */
+int efi_mem_type(unsigned long phys_addr)
+{
+ const efi_memory_desc_t *md;
+
+ if (!efi_enabled(EFI_MEMMAP))
+ return -ENOTSUPP;
+
+ for_each_efi_memory_desc(md) {
+ if ((md->phys_addr <= phys_addr) &&
+ (phys_addr < (md->phys_addr +
+ (md->num_pages << EFI_PAGE_SHIFT))))
+ return md->type;
+ }
+ return -EINVAL;
+}
+#endif
+
int efi_status_to_err(efi_status_t status)
{
int err;
@@ -855,6 +900,20 @@ int efi_status_to_err(efi_status_t status)
return err;
}
+bool efi_is_table_address(unsigned long phys_addr)
+{
+ unsigned int i;
+
+ if (phys_addr == EFI_INVALID_TABLE_ADDR)
+ return false;
+
+ for (i = 0; i < ARRAY_SIZE(efi_tables); i++)
+ if (*(efi_tables[i]) == phys_addr)
+ return true;
+
+ return false;
+}
+
#ifdef CONFIG_KEXEC
static int update_efi_random_seed(struct notifier_block *nb,
unsigned long code, void *unused)
@@ -867,7 +926,7 @@ static int update_efi_random_seed(struct notifier_block *nb,
seed = memremap(efi.rng_seed, sizeof(*seed), MEMREMAP_WB);
if (seed != NULL) {
- size = min(seed->size, 32U);
+ size = min(seed->size, EFI_RANDOM_SEED_SIZE);
memunmap(seed);
} else {
pr_err("Could not map UEFI random seed!\n");
diff --git a/drivers/firmware/efi/esrt.c b/drivers/firmware/efi/esrt.c
index 8554d7aec31c..bd7ed3c1148a 100644
--- a/drivers/firmware/efi/esrt.c
+++ b/drivers/firmware/efi/esrt.c
@@ -230,7 +230,7 @@ static umode_t esrt_attr_is_visible(struct kobject *kobj,
return attr->mode;
}
-static struct attribute_group esrt_attr_group = {
+static const struct attribute_group esrt_attr_group = {
.attrs = esrt_attrs,
.is_visible = esrt_attr_is_visible,
};
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
index 37e24f525162..dedf9bde44db 100644
--- a/drivers/firmware/efi/libstub/Makefile
+++ b/drivers/firmware/efi/libstub/Makefile
@@ -10,7 +10,7 @@ cflags-$(CONFIG_X86) += -m$(BITS) -D__KERNEL__ -O2 \
-fPIC -fno-strict-aliasing -mno-red-zone \
-mno-mmx -mno-sse
-cflags-$(CONFIG_ARM64) := $(subst -pg,,$(KBUILD_CFLAGS))
+cflags-$(CONFIG_ARM64) := $(subst -pg,,$(KBUILD_CFLAGS)) -fpie
cflags-$(CONFIG_ARM) := $(subst -pg,,$(KBUILD_CFLAGS)) \
-fno-builtin -fpic -mno-single-pic-base
@@ -30,6 +30,7 @@ OBJECT_FILES_NON_STANDARD := y
KCOV_INSTRUMENT := n
lib-y := efi-stub-helper.o gop.o secureboot.o
+lib-$(CONFIG_RESET_ATTACK_MITIGATION) += tpm.o
# include the stub's generic dependencies from lib/ when building for ARM/arm64
arm-deps := fdt_rw.c fdt_ro.c fdt_wip.c fdt.c fdt_empty_tree.c fdt_sw.c sort.c
diff --git a/drivers/firmware/efi/libstub/arm-stub.c b/drivers/firmware/efi/libstub/arm-stub.c
index 8181ac179d14..1cb2d1c070c3 100644
--- a/drivers/firmware/efi/libstub/arm-stub.c
+++ b/drivers/firmware/efi/libstub/arm-stub.c
@@ -192,6 +192,9 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table,
goto fail_free_cmdline;
}
+ /* Ask the firmware to clear memory on unclean shutdown */
+ efi_enable_reset_attack_mitigation(sys_table);
+
secure_boot = efi_get_secureboot(sys_table);
/*
diff --git a/drivers/firmware/efi/libstub/arm64-stub.c b/drivers/firmware/efi/libstub/arm64-stub.c
index b4c2589d7c91..b9bd827caa22 100644
--- a/drivers/firmware/efi/libstub/arm64-stub.c
+++ b/drivers/firmware/efi/libstub/arm64-stub.c
@@ -9,9 +9,18 @@
* published by the Free Software Foundation.
*
*/
+
+/*
+ * To prevent the compiler from emitting GOT-indirected (and thus absolute)
+ * references to the section markers, override their visibility as 'hidden'
+ */
+#pragma GCC visibility push(hidden)
+#include <asm/sections.h>
+#pragma GCC visibility pop
+
#include <linux/efi.h>
#include <asm/efi.h>
-#include <asm/sections.h>
+#include <asm/memory.h>
#include <asm/sysreg.h>
#include "efistub.h"
@@ -81,9 +90,10 @@ efi_status_t handle_kernel_image(efi_system_table_t *sys_table_arg,
/*
* If CONFIG_DEBUG_ALIGN_RODATA is not set, produce a
* displacement in the interval [0, MIN_KIMG_ALIGN) that
- * is a multiple of the minimal segment alignment (SZ_64K)
+ * doesn't violate this kernel's de-facto alignment
+ * constraints.
*/
- u32 mask = (MIN_KIMG_ALIGN - 1) & ~(SZ_64K - 1);
+ u32 mask = (MIN_KIMG_ALIGN - 1) & ~(EFI_KIMG_ALIGN - 1);
u32 offset = !IS_ENABLED(CONFIG_DEBUG_ALIGN_RODATA) ?
(phys_seed >> 32) & mask : TEXT_OFFSET;
diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c
index b0184360efc6..50a9cab5a834 100644
--- a/drivers/firmware/efi/libstub/efi-stub-helper.c
+++ b/drivers/firmware/efi/libstub/efi-stub-helper.c
@@ -205,7 +205,7 @@ again:
unsigned long m = (unsigned long)map;
u64 start, end;
- desc = (efi_memory_desc_t *)(m + (i * desc_size));
+ desc = efi_early_memdesc_ptr(m, desc_size, i);
if (desc->type != EFI_CONVENTIONAL_MEMORY)
continue;
@@ -298,7 +298,7 @@ efi_status_t efi_low_alloc(efi_system_table_t *sys_table_arg,
unsigned long m = (unsigned long)map;
u64 start, end;
- desc = (efi_memory_desc_t *)(m + (i * desc_size));
+ desc = efi_early_memdesc_ptr(m, desc_size, i);
if (desc->type != EFI_CONVENTIONAL_MEMORY)
continue;
diff --git a/drivers/firmware/efi/libstub/random.c b/drivers/firmware/efi/libstub/random.c
index 7e72954d5860..e0e603a89aa9 100644
--- a/drivers/firmware/efi/libstub/random.c
+++ b/drivers/firmware/efi/libstub/random.c
@@ -145,8 +145,6 @@ efi_status_t efi_random_alloc(efi_system_table_t *sys_table_arg,
return status;
}
-#define RANDOM_SEED_SIZE 32
-
efi_status_t efi_random_get_seed(efi_system_table_t *sys_table_arg)
{
efi_guid_t rng_proto = EFI_RNG_PROTOCOL_GUID;
@@ -162,25 +160,25 @@ efi_status_t efi_random_get_seed(efi_system_table_t *sys_table_arg)
return status;
status = efi_call_early(allocate_pool, EFI_RUNTIME_SERVICES_DATA,
- sizeof(*seed) + RANDOM_SEED_SIZE,
+ sizeof(*seed) + EFI_RANDOM_SEED_SIZE,
(void **)&seed);
if (status != EFI_SUCCESS)
return status;
- status = rng->get_rng(rng, &rng_algo_raw, RANDOM_SEED_SIZE,
+ status = rng->get_rng(rng, &rng_algo_raw, EFI_RANDOM_SEED_SIZE,
seed->bits);
if (status == EFI_UNSUPPORTED)
/*
* Use whatever algorithm we have available if the raw algorithm
* is not implemented.
*/
- status = rng->get_rng(rng, NULL, RANDOM_SEED_SIZE,
+ status = rng->get_rng(rng, NULL, EFI_RANDOM_SEED_SIZE,
seed->bits);
if (status != EFI_SUCCESS)
goto err_freepool;
- seed->size = RANDOM_SEED_SIZE;
+ seed->size = EFI_RANDOM_SEED_SIZE;
status = efi_call_early(install_configuration_table, &rng_table_guid,
seed);
if (status != EFI_SUCCESS)
diff --git a/drivers/firmware/efi/libstub/tpm.c b/drivers/firmware/efi/libstub/tpm.c
new file mode 100644
index 000000000000..6224cdbc9669
--- /dev/null
+++ b/drivers/firmware/efi/libstub/tpm.c
@@ -0,0 +1,58 @@
+/*
+ * TPM handling.
+ *
+ * Copyright (C) 2016 CoreOS, Inc
+ * Copyright (C) 2017 Google, Inc.
+ * Matthew Garrett <mjg59@google.com>
+ *
+ * This file is part of the Linux kernel, and is made available under the
+ * terms of the GNU General Public License version 2.
+ */
+#include <linux/efi.h>
+#include <asm/efi.h>
+
+#include "efistub.h"
+
+static const efi_char16_t efi_MemoryOverWriteRequest_name[] = {
+ 'M', 'e', 'm', 'o', 'r', 'y', 'O', 'v', 'e', 'r', 'w', 'r', 'i', 't',
+ 'e', 'R', 'e', 'q', 'u', 'e', 's', 't', 'C', 'o', 'n', 't', 'r', 'o',
+ 'l', 0
+};
+
+#define MEMORY_ONLY_RESET_CONTROL_GUID \
+ EFI_GUID(0xe20939be, 0x32d4, 0x41be, 0xa1, 0x50, 0x89, 0x7f, 0x85, 0xd4, 0x98, 0x29)
+
+#define get_efi_var(name, vendor, ...) \
+ efi_call_runtime(get_variable, \
+ (efi_char16_t *)(name), (efi_guid_t *)(vendor), \
+ __VA_ARGS__)
+
+#define set_efi_var(name, vendor, ...) \
+ efi_call_runtime(set_variable, \
+ (efi_char16_t *)(name), (efi_guid_t *)(vendor), \
+ __VA_ARGS__)
+
+/*
+ * Enable reboot attack mitigation. This requests that the firmware clear the
+ * RAM on next reboot before proceeding with boot, ensuring that any secrets
+ * are cleared. If userland has ensured that all secrets have been removed
+ * from RAM before reboot it can simply reset this variable.
+ */
+void efi_enable_reset_attack_mitigation(efi_system_table_t *sys_table_arg)
+{
+ u8 val = 1;
+ efi_guid_t var_guid = MEMORY_ONLY_RESET_CONTROL_GUID;
+ efi_status_t status;
+ unsigned long datasize = 0;
+
+ status = get_efi_var(efi_MemoryOverWriteRequest_name, &var_guid,
+ NULL, &datasize, NULL);
+
+ if (status == EFI_NOT_FOUND)
+ return;
+
+ set_efi_var(efi_MemoryOverWriteRequest_name, &var_guid,
+ EFI_VARIABLE_NON_VOLATILE |
+ EFI_VARIABLE_BOOTSERVICE_ACCESS |
+ EFI_VARIABLE_RUNTIME_ACCESS, sizeof(val), &val);
+}
diff --git a/drivers/firmware/efi/reboot.c b/drivers/firmware/efi/reboot.c
index 62ead9b9d871..22874544d301 100644
--- a/drivers/firmware/efi/reboot.c
+++ b/drivers/firmware/efi/reboot.c
@@ -5,6 +5,8 @@
#include <linux/efi.h>
#include <linux/reboot.h>
+static void (*orig_pm_power_off)(void);
+
int efi_reboot_quirk_mode = -1;
void efi_reboot(enum reboot_mode reboot_mode, const char *__unused)
@@ -51,6 +53,12 @@ bool __weak efi_poweroff_required(void)
static void efi_power_off(void)
{
efi.reset_system(EFI_RESET_SHUTDOWN, EFI_SUCCESS, 0, NULL);
+ /*
+ * The above call should not return, if it does fall back to
+ * the original power off method (typically ACPI poweroff).
+ */
+ if (orig_pm_power_off)
+ orig_pm_power_off();
}
static int __init efi_shutdown_init(void)
@@ -58,8 +66,10 @@ static int __init efi_shutdown_init(void)
if (!efi_enabled(EFI_RUNTIME_SERVICES))
return -ENODEV;
- if (efi_poweroff_required())
+ if (efi_poweroff_required()) {
+ orig_pm_power_off = pm_power_off;
pm_power_off = efi_power_off;
+ }
return 0;
}
diff --git a/drivers/firmware/google/gsmi.c b/drivers/firmware/google/gsmi.c
index c46387160976..c8f169bf2e27 100644
--- a/drivers/firmware/google/gsmi.c
+++ b/drivers/firmware/google/gsmi.c
@@ -709,7 +709,7 @@ static u32 __init hash_oem_table_id(char s[8])
return local_hash_64(input, 32);
}
-static struct dmi_system_id gsmi_dmi_table[] __initdata = {
+static const struct dmi_system_id gsmi_dmi_table[] __initconst = {
{
.ident = "Google Board",
.matches = {
diff --git a/drivers/firmware/google/memconsole-x86-legacy.c b/drivers/firmware/google/memconsole-x86-legacy.c
index 8c1bf6dbdaa6..19bcbd10855b 100644
--- a/drivers/firmware/google/memconsole-x86-legacy.c
+++ b/drivers/firmware/google/memconsole-x86-legacy.c
@@ -126,7 +126,7 @@ static bool memconsole_ebda_init(void)
return false;
}
-static struct dmi_system_id memconsole_dmi_table[] __initdata = {
+static const struct dmi_system_id memconsole_dmi_table[] __initconst = {
{
.ident = "Google Board",
.matches = {
diff --git a/drivers/firmware/google/vpd.c b/drivers/firmware/google/vpd.c
index 78945729388e..35e553b3b190 100644
--- a/drivers/firmware/google/vpd.c
+++ b/drivers/firmware/google/vpd.c
@@ -202,7 +202,7 @@ static int vpd_section_init(const char *name, struct vpd_section *sec,
sec->raw_name = kasprintf(GFP_KERNEL, "%s_raw", name);
if (!sec->raw_name) {
err = -ENOMEM;
- goto err_iounmap;
+ goto err_memunmap;
}
sysfs_bin_attr_init(&sec->bin_attr);
@@ -233,8 +233,8 @@ err_sysfs_remove:
sysfs_remove_bin_file(vpd_kobj, &sec->bin_attr);
err_free_raw_name:
kfree(sec->raw_name);
-err_iounmap:
- iounmap(sec->baseaddr);
+err_memunmap:
+ memunmap(sec->baseaddr);
return err;
}
@@ -245,7 +245,7 @@ static int vpd_section_destroy(struct vpd_section *sec)
kobject_put(sec->kobj);
sysfs_remove_bin_file(vpd_kobj, &sec->bin_attr);
kfree(sec->raw_name);
- iounmap(sec->baseaddr);
+ memunmap(sec->baseaddr);
}
return 0;
@@ -262,7 +262,7 @@ static int vpd_sections_init(phys_addr_t physaddr)
return -ENOMEM;
memcpy_fromio(&header, temp, sizeof(struct vpd_cbmem));
- iounmap(temp);
+ memunmap(temp);
if (header.magic != VPD_CBMEM_MAGIC)
return -ENODEV;
diff --git a/drivers/firmware/pcdp.c b/drivers/firmware/pcdp.c
index 75273a251603..e83d6aec0c13 100644
--- a/drivers/firmware/pcdp.c
+++ b/drivers/firmware/pcdp.c
@@ -95,7 +95,7 @@ efi_setup_pcdp_console(char *cmdline)
if (efi.hcdp == EFI_INVALID_TABLE_ADDR)
return -ENODEV;
- pcdp = early_ioremap(efi.hcdp, 4096);
+ pcdp = early_memremap(efi.hcdp, 4096);
printk(KERN_INFO "PCDP: v%d at 0x%lx\n", pcdp->rev, efi.hcdp);
if (strstr(cmdline, "console=hcdp")) {
@@ -131,6 +131,6 @@ efi_setup_pcdp_console(char *cmdline)
}
out:
- early_iounmap(pcdp, 4096);
+ early_memunmap(pcdp, 4096);
return rc;
}
diff --git a/drivers/firmware/psci.c b/drivers/firmware/psci.c
index 493a56a4cfc4..d687ca3d5049 100644
--- a/drivers/firmware/psci.c
+++ b/drivers/firmware/psci.c
@@ -280,8 +280,8 @@ static int psci_dt_cpu_init_idle(struct device_node *cpu_node, int cpu)
"arm,psci-suspend-param",
&state);
if (ret) {
- pr_warn(" * %s missing arm,psci-suspend-param property\n",
- state_node->full_name);
+ pr_warn(" * %pOF missing arm,psci-suspend-param property\n",
+ state_node);
of_node_put(state_node);
goto free_mem;
}
diff --git a/drivers/firmware/tegra/bpmp.c b/drivers/firmware/tegra/bpmp.c
index b25179517cc5..73ca55b7b7ec 100644
--- a/drivers/firmware/tegra/bpmp.c
+++ b/drivers/firmware/tegra/bpmp.c
@@ -806,6 +806,8 @@ static int tegra_bpmp_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "firmware: %s\n", tag);
+ platform_set_drvdata(pdev, bpmp);
+
err = of_platform_default_populate(pdev->dev.of_node, NULL, &pdev->dev);
if (err < 0)
goto free_mrq;
@@ -822,8 +824,6 @@ static int tegra_bpmp_probe(struct platform_device *pdev)
if (err < 0)
goto free_mrq;
- platform_set_drvdata(pdev, bpmp);
-
return 0;
free_mrq:
diff --git a/drivers/fmc/Makefile b/drivers/fmc/Makefile
index b9452919739f..e809322e1bac 100644
--- a/drivers/fmc/Makefile
+++ b/drivers/fmc/Makefile
@@ -6,6 +6,7 @@ fmc-y += fmc-match.o
fmc-y += fmc-sdb.o
fmc-y += fru-parse.o
fmc-y += fmc-dump.o
+fmc-y += fmc-debug.o
obj-$(CONFIG_FMC_FAKEDEV) += fmc-fakedev.o
obj-$(CONFIG_FMC_TRIVIAL) += fmc-trivial.o
diff --git a/drivers/fmc/fmc-chardev.c b/drivers/fmc/fmc-chardev.c
index ace6ef24d15e..5ecf4090a610 100644
--- a/drivers/fmc/fmc-chardev.c
+++ b/drivers/fmc/fmc-chardev.c
@@ -129,8 +129,7 @@ static int fc_probe(struct fmc_device *fmc)
struct fc_instance *fc;
- if (fmc->op->validate)
- index = fmc->op->validate(fmc, &fc_drv);
+ index = fmc_validate(fmc, &fc_drv);
if (index < 0)
return -EINVAL; /* not our device: invalid */
diff --git a/drivers/fmc/fmc-core.c b/drivers/fmc/fmc-core.c
index 353fc546fb08..cec3b8db0d69 100644
--- a/drivers/fmc/fmc-core.c
+++ b/drivers/fmc/fmc-core.c
@@ -13,6 +13,9 @@
#include <linux/init.h>
#include <linux/device.h>
#include <linux/fmc.h>
+#include <linux/fmc-sdb.h>
+
+#include "fmc-private.h"
static int fmc_check_version(unsigned long version, const char *name)
{
@@ -118,6 +121,61 @@ static struct bin_attribute fmc_eeprom_attr = {
.write = fmc_write_eeprom,
};
+int fmc_irq_request(struct fmc_device *fmc, irq_handler_t h,
+ char *name, int flags)
+{
+ if (fmc->op->irq_request)
+ return fmc->op->irq_request(fmc, h, name, flags);
+ return -EPERM;
+}
+EXPORT_SYMBOL(fmc_irq_request);
+
+void fmc_irq_free(struct fmc_device *fmc)
+{
+ if (fmc->op->irq_free)
+ fmc->op->irq_free(fmc);
+}
+EXPORT_SYMBOL(fmc_irq_free);
+
+void fmc_irq_ack(struct fmc_device *fmc)
+{
+ if (likely(fmc->op->irq_ack))
+ fmc->op->irq_ack(fmc);
+}
+EXPORT_SYMBOL(fmc_irq_ack);
+
+int fmc_validate(struct fmc_device *fmc, struct fmc_driver *drv)
+{
+ if (fmc->op->validate)
+ return fmc->op->validate(fmc, drv);
+ return -EPERM;
+}
+EXPORT_SYMBOL(fmc_validate);
+
+int fmc_gpio_config(struct fmc_device *fmc, struct fmc_gpio *gpio, int ngpio)
+{
+ if (fmc->op->gpio_config)
+ return fmc->op->gpio_config(fmc, gpio, ngpio);
+ return -EPERM;
+}
+EXPORT_SYMBOL(fmc_gpio_config);
+
+int fmc_read_ee(struct fmc_device *fmc, int pos, void *d, int l)
+{
+ if (fmc->op->read_ee)
+ return fmc->op->read_ee(fmc, pos, d, l);
+ return -EPERM;
+}
+EXPORT_SYMBOL(fmc_read_ee);
+
+int fmc_write_ee(struct fmc_device *fmc, int pos, const void *d, int l)
+{
+ if (fmc->op->write_ee)
+ return fmc->op->write_ee(fmc, pos, d, l);
+ return -EPERM;
+}
+EXPORT_SYMBOL(fmc_write_ee);
+
/*
* Functions for client modules follow
*/
@@ -141,7 +199,8 @@ EXPORT_SYMBOL(fmc_driver_unregister);
* When a device set is registered, all eeproms must be read
* and all FRUs must be parsed
*/
-int fmc_device_register_n(struct fmc_device **devs, int n)
+int fmc_device_register_n_gw(struct fmc_device **devs, int n,
+ struct fmc_gateware *gw)
{
struct fmc_device *fmc, **devarray;
uint32_t device_id;
@@ -221,6 +280,21 @@ int fmc_device_register_n(struct fmc_device **devs, int n)
else
dev_set_name(&fmc->dev, "%s-%04x", fmc->mezzanine_name,
device_id);
+
+ if (gw) {
+ /*
+ * The carrier already know the bitstream to load
+ * for this set of FMC mezzanines.
+ */
+ ret = fmc->op->reprogram_raw(fmc, NULL,
+ gw->bitstream, gw->len);
+ if (ret) {
+ dev_warn(fmc->hwdev,
+ "Invalid gateware for FMC mezzanine\n");
+ goto out;
+ }
+ }
+
ret = device_add(&fmc->dev);
if (ret < 0) {
dev_err(fmc->hwdev, "Slot %i: Failed in registering "
@@ -234,18 +308,16 @@ int fmc_device_register_n(struct fmc_device **devs, int n)
}
/* This device went well, give information to the user */
fmc_dump_eeprom(fmc);
- fmc_dump_sdb(fmc);
+ fmc_debug_init(fmc);
}
return 0;
out1:
device_del(&fmc->dev);
out:
- fmc_free_id_info(fmc);
- put_device(&fmc->dev);
-
kfree(devarray);
for (i--; i >= 0; i--) {
+ fmc_debug_exit(devs[i]);
sysfs_remove_bin_file(&devs[i]->dev.kobj, &fmc_eeprom_attr);
device_del(&devs[i]->dev);
fmc_free_id_info(devs[i]);
@@ -254,8 +326,20 @@ out:
return ret;
}
+EXPORT_SYMBOL(fmc_device_register_n_gw);
+
+int fmc_device_register_n(struct fmc_device **devs, int n)
+{
+ return fmc_device_register_n_gw(devs, n, NULL);
+}
EXPORT_SYMBOL(fmc_device_register_n);
+int fmc_device_register_gw(struct fmc_device *fmc, struct fmc_gateware *gw)
+{
+ return fmc_device_register_n_gw(&fmc, 1, gw);
+}
+EXPORT_SYMBOL(fmc_device_register_gw);
+
int fmc_device_register(struct fmc_device *fmc)
{
return fmc_device_register_n(&fmc, 1);
@@ -273,6 +357,7 @@ void fmc_device_unregister_n(struct fmc_device **devs, int n)
kfree(devs[0]->devarray);
for (i = 0; i < n; i++) {
+ fmc_debug_exit(devs[i]);
sysfs_remove_bin_file(&devs[i]->dev.kobj, &fmc_eeprom_attr);
device_del(&devs[i]->dev);
fmc_free_id_info(devs[i]);
diff --git a/drivers/fmc/fmc-debug.c b/drivers/fmc/fmc-debug.c
new file mode 100644
index 000000000000..32930722770c
--- /dev/null
+++ b/drivers/fmc/fmc-debug.c
@@ -0,0 +1,173 @@
+/*
+ * Copyright (C) 2015 CERN (www.cern.ch)
+ * Author: Federico Vaga <federico.vaga@cern.ch>
+ *
+ * Released according to the GNU GPL, version 2 or any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <asm/byteorder.h>
+
+#include <linux/fmc.h>
+#include <linux/sdb.h>
+#include <linux/fmc-sdb.h>
+
+#define FMC_DBG_SDB_DUMP "dump_sdb"
+
+static char *__strip_trailing_space(char *buf, char *str, int len)
+{
+ int i = len - 1;
+
+ memcpy(buf, str, len);
+ buf[len] = '\0';
+ while (i >= 0 && buf[i] == ' ')
+ buf[i--] = '\0';
+ return buf;
+}
+
+#define __sdb_string(buf, field) ({ \
+ BUILD_BUG_ON(sizeof(buf) < sizeof(field)); \
+ __strip_trailing_space(buf, (void *)(field), sizeof(field)); \
+ })
+
+/**
+ * We do not check seq_printf() errors because we want to see things in any case
+ */
+static void fmc_sdb_dump_recursive(struct fmc_device *fmc, struct seq_file *s,
+ const struct sdb_array *arr)
+{
+ unsigned long base = arr->baseaddr;
+ int i, j, n = arr->len, level = arr->level;
+ char tmp[64];
+
+ for (i = 0; i < n; i++) {
+ union sdb_record *r;
+ struct sdb_product *p;
+ struct sdb_component *c;
+
+ r = &arr->record[i];
+ c = &r->dev.sdb_component;
+ p = &c->product;
+
+ for (j = 0; j < level; j++)
+ seq_printf(s, " ");
+ switch (r->empty.record_type) {
+ case sdb_type_interconnect:
+ seq_printf(s, "%08llx:%08x %.19s\n",
+ __be64_to_cpu(p->vendor_id),
+ __be32_to_cpu(p->device_id),
+ p->name);
+ break;
+ case sdb_type_device:
+ seq_printf(s, "%08llx:%08x %.19s (%08llx-%08llx)\n",
+ __be64_to_cpu(p->vendor_id),
+ __be32_to_cpu(p->device_id),
+ p->name,
+ __be64_to_cpu(c->addr_first) + base,
+ __be64_to_cpu(c->addr_last) + base);
+ break;
+ case sdb_type_bridge:
+ seq_printf(s, "%08llx:%08x %.19s (bridge: %08llx)\n",
+ __be64_to_cpu(p->vendor_id),
+ __be32_to_cpu(p->device_id),
+ p->name,
+ __be64_to_cpu(c->addr_first) + base);
+ if (IS_ERR(arr->subtree[i])) {
+ seq_printf(s, "SDB: (bridge error %li)\n",
+ PTR_ERR(arr->subtree[i]));
+ break;
+ }
+ fmc_sdb_dump_recursive(fmc, s, arr->subtree[i]);
+ break;
+ case sdb_type_integration:
+ seq_printf(s, "integration\n");
+ break;
+ case sdb_type_repo_url:
+ seq_printf(s, "Synthesis repository: %s\n",
+ __sdb_string(tmp, r->repo_url.repo_url));
+ break;
+ case sdb_type_synthesis:
+ seq_printf(s, "Bitstream '%s' ",
+ __sdb_string(tmp, r->synthesis.syn_name));
+ seq_printf(s, "synthesized %08x by %s ",
+ __be32_to_cpu(r->synthesis.date),
+ __sdb_string(tmp, r->synthesis.user_name));
+ seq_printf(s, "(%s version %x), ",
+ __sdb_string(tmp, r->synthesis.tool_name),
+ __be32_to_cpu(r->synthesis.tool_version));
+ seq_printf(s, "commit %pm\n",
+ r->synthesis.commit_id);
+ break;
+ case sdb_type_empty:
+ seq_printf(s, "empty\n");
+ break;
+ default:
+ seq_printf(s, "UNKNOWN TYPE 0x%02x\n",
+ r->empty.record_type);
+ break;
+ }
+ }
+}
+
+static int fmc_sdb_dump(struct seq_file *s, void *offset)
+{
+ struct fmc_device *fmc = s->private;
+
+ if (!fmc->sdb) {
+ seq_printf(s, "no SDB information\n");
+ return 0;
+ }
+
+ seq_printf(s, "FMC: %s (%s), slot %i, device %s\n", dev_name(fmc->hwdev),
+ fmc->carrier_name, fmc->slot_id, dev_name(&fmc->dev));
+ /* Dump SDB information */
+ fmc_sdb_dump_recursive(fmc, s, fmc->sdb);
+
+ return 0;
+}
+
+
+static int fmc_sdb_dump_open(struct inode *inode, struct file *file)
+{
+ struct fmc_device *fmc = inode->i_private;
+
+ return single_open(file, fmc_sdb_dump, fmc);
+}
+
+
+const struct file_operations fmc_dbgfs_sdb_dump = {
+ .owner = THIS_MODULE,
+ .open = fmc_sdb_dump_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+int fmc_debug_init(struct fmc_device *fmc)
+{
+ fmc->dbg_dir = debugfs_create_dir(dev_name(&fmc->dev), NULL);
+ if (IS_ERR_OR_NULL(fmc->dbg_dir)) {
+ pr_err("FMC: Cannot create debugfs\n");
+ return PTR_ERR(fmc->dbg_dir);
+ }
+
+ fmc->dbg_sdb_dump = debugfs_create_file(FMC_DBG_SDB_DUMP, 0444,
+ fmc->dbg_dir, fmc,
+ &fmc_dbgfs_sdb_dump);
+ if (IS_ERR_OR_NULL(fmc->dbg_sdb_dump))
+ pr_err("FMC: Cannot create debugfs file %s\n",
+ FMC_DBG_SDB_DUMP);
+
+ return 0;
+}
+
+void fmc_debug_exit(struct fmc_device *fmc)
+{
+ if (fmc->dbg_dir)
+ debugfs_remove_recursive(fmc->dbg_dir);
+}
diff --git a/drivers/fmc/fmc-dump.c b/drivers/fmc/fmc-dump.c
index c91afd6388f6..cd1df475b254 100644
--- a/drivers/fmc/fmc-dump.c
+++ b/drivers/fmc/fmc-dump.c
@@ -15,8 +15,6 @@
static int fmc_must_dump_eeprom;
module_param_named(dump_eeprom, fmc_must_dump_eeprom, int, 0644);
-static int fmc_must_dump_sdb;
-module_param_named(dump_sdb, fmc_must_dump_sdb, int, 0644);
#define LINELEN 16
@@ -59,42 +57,3 @@ void fmc_dump_eeprom(const struct fmc_device *fmc)
for (i = 0; i < fmc->eeprom_len; i += LINELEN, line += LINELEN)
prev = dump_line(i, line, prev);
}
-
-void fmc_dump_sdb(const struct fmc_device *fmc)
-{
- const uint8_t *line, *prev;
- int i, len;
-
- if (!fmc->sdb)
- return;
- if (!fmc_must_dump_sdb)
- return;
-
- /* If the argument is not-zero, do simple dump (== show) */
- if (fmc_must_dump_sdb > 0)
- fmc_show_sdb_tree(fmc);
-
- if (fmc_must_dump_sdb == 1)
- return;
-
- /* If bigger than 1, dump it seriously, to help debugging */
-
- /*
- * Here we should really use libsdbfs (which is designed to
- * work in kernel space as well) , but it doesn't support
- * directories yet, and it requires better intergration (it
- * should be used instead of fmc-specific code).
- *
- * So, lazily, just dump the top-level array
- */
- pr_info("FMC: %s (%s), slot %i, device %s\n", dev_name(fmc->hwdev),
- fmc->carrier_name, fmc->slot_id, dev_name(&fmc->dev));
- pr_info("FMC: poor dump of sdb first level:\n");
-
- len = fmc->sdb->len * sizeof(union sdb_record);
- line = (void *)fmc->sdb->record;
- prev = NULL;
- for (i = 0; i < len; i += LINELEN, line += LINELEN)
- prev = dump_line(i, line, prev);
- return;
-}
diff --git a/drivers/fmc/fmc-match.c b/drivers/fmc/fmc-match.c
index 104a5efc2207..a0956d1f7550 100644
--- a/drivers/fmc/fmc-match.c
+++ b/drivers/fmc/fmc-match.c
@@ -63,7 +63,7 @@ int fmc_fill_id_info(struct fmc_device *fmc)
if (!fmc->eeprom)
return -ENOMEM;
allocated = 1;
- ret = fmc->op->read_ee(fmc, 0, fmc->eeprom, fmc->eeprom_len);
+ ret = fmc_read_ee(fmc, 0, fmc->eeprom, fmc->eeprom_len);
if (ret < 0)
goto out;
}
diff --git a/drivers/fmc/fmc-private.h b/drivers/fmc/fmc-private.h
new file mode 100644
index 000000000000..1e5136643bdc
--- /dev/null
+++ b/drivers/fmc/fmc-private.h
@@ -0,0 +1,9 @@
+/*
+ * Copyright (C) 2015 CERN (www.cern.ch)
+ * Author: Federico Vaga <federico.vaga@cern.ch>
+ *
+ * Released according to the GNU GPL, version 2 or any later version.
+ */
+
+extern int fmc_debug_init(struct fmc_device *fmc);
+extern void fmc_debug_exit(struct fmc_device *fmc);
diff --git a/drivers/fmc/fmc-sdb.c b/drivers/fmc/fmc-sdb.c
index 4603fdb74465..ffdc1762b580 100644
--- a/drivers/fmc/fmc-sdb.c
+++ b/drivers/fmc/fmc-sdb.c
@@ -127,12 +127,12 @@ int fmc_free_sdb_tree(struct fmc_device *fmc)
EXPORT_SYMBOL(fmc_free_sdb_tree);
/* This helper calls reprogram and inizialized sdb as well */
-int fmc_reprogram(struct fmc_device *fmc, struct fmc_driver *d, char *gw,
- int sdb_entry)
+int fmc_reprogram_raw(struct fmc_device *fmc, struct fmc_driver *d,
+ void *gw, unsigned long len, int sdb_entry)
{
int ret;
- ret = fmc->op->reprogram(fmc, d, gw);
+ ret = fmc->op->reprogram_raw(fmc, d, gw, len);
if (ret < 0)
return ret;
if (sdb_entry < 0)
@@ -145,108 +145,39 @@ int fmc_reprogram(struct fmc_device *fmc, struct fmc_driver *d, char *gw,
sdb_entry);
return -ENODEV;
}
- fmc_dump_sdb(fmc);
- return 0;
-}
-EXPORT_SYMBOL(fmc_reprogram);
-
-static char *__strip_trailing_space(char *buf, char *str, int len)
-{
- int i = len - 1;
- memcpy(buf, str, len);
- while(i >= 0 && buf[i] == ' ')
- buf[i--] = '\0';
- return buf;
+ return 0;
}
+EXPORT_SYMBOL(fmc_reprogram_raw);
-#define __sdb_string(buf, field) ({ \
- BUILD_BUG_ON(sizeof(buf) < sizeof(field)); \
- __strip_trailing_space(buf, (void *)(field), sizeof(field)); \
- })
-
-static void __fmc_show_sdb_tree(const struct fmc_device *fmc,
- const struct sdb_array *arr)
+/* This helper calls reprogram and inizialized sdb as well */
+int fmc_reprogram(struct fmc_device *fmc, struct fmc_driver *d, char *gw,
+ int sdb_entry)
{
- unsigned long base = arr->baseaddr;
- int i, j, n = arr->len, level = arr->level;
- char buf[64];
-
- for (i = 0; i < n; i++) {
- union sdb_record *r;
- struct sdb_product *p;
- struct sdb_component *c;
- r = &arr->record[i];
- c = &r->dev.sdb_component;
- p = &c->product;
+ int ret;
- dev_info(&fmc->dev, "SDB: ");
+ ret = fmc->op->reprogram(fmc, d, gw);
+ if (ret < 0)
+ return ret;
+ if (sdb_entry < 0)
+ return ret;
- for (j = 0; j < level; j++)
- printk(KERN_CONT " ");
- switch (r->empty.record_type) {
- case sdb_type_interconnect:
- printk(KERN_CONT "%08llx:%08x %.19s\n",
- __be64_to_cpu(p->vendor_id),
- __be32_to_cpu(p->device_id),
- p->name);
- break;
- case sdb_type_device:
- printk(KERN_CONT "%08llx:%08x %.19s (%08llx-%08llx)\n",
- __be64_to_cpu(p->vendor_id),
- __be32_to_cpu(p->device_id),
- p->name,
- __be64_to_cpu(c->addr_first) + base,
- __be64_to_cpu(c->addr_last) + base);
- break;
- case sdb_type_bridge:
- printk(KERN_CONT "%08llx:%08x %.19s (bridge: %08llx)\n",
- __be64_to_cpu(p->vendor_id),
- __be32_to_cpu(p->device_id),
- p->name,
- __be64_to_cpu(c->addr_first) + base);
- if (IS_ERR(arr->subtree[i])) {
- dev_info(&fmc->dev, "SDB: (bridge error %li)\n",
- PTR_ERR(arr->subtree[i]));
- break;
- }
- __fmc_show_sdb_tree(fmc, arr->subtree[i]);
- break;
- case sdb_type_integration:
- printk(KERN_CONT "integration\n");
- break;
- case sdb_type_repo_url:
- printk(KERN_CONT "Synthesis repository: %s\n",
- __sdb_string(buf, r->repo_url.repo_url));
- break;
- case sdb_type_synthesis:
- printk(KERN_CONT "Bitstream '%s' ",
- __sdb_string(buf, r->synthesis.syn_name));
- printk(KERN_CONT "synthesized %08x by %s ",
- __be32_to_cpu(r->synthesis.date),
- __sdb_string(buf, r->synthesis.user_name));
- printk(KERN_CONT "(%s version %x), ",
- __sdb_string(buf, r->synthesis.tool_name),
- __be32_to_cpu(r->synthesis.tool_version));
- printk(KERN_CONT "commit %pm\n",
- r->synthesis.commit_id);
- break;
- case sdb_type_empty:
- printk(KERN_CONT "empty\n");
- break;
- default:
- printk(KERN_CONT "UNKNOWN TYPE 0x%02x\n",
- r->empty.record_type);
- break;
- }
+ /* We are required to find SDB at a given offset */
+ ret = fmc_scan_sdb_tree(fmc, sdb_entry);
+ if (ret < 0) {
+ dev_err(&fmc->dev, "Can't find SDB at address 0x%x\n",
+ sdb_entry);
+ return -ENODEV;
}
+
+ return 0;
}
+EXPORT_SYMBOL(fmc_reprogram);
void fmc_show_sdb_tree(const struct fmc_device *fmc)
{
- if (!fmc->sdb)
- return;
- __fmc_show_sdb_tree(fmc, fmc->sdb);
+ pr_err("%s: not supported anymore, use debugfs to dump SDB\n",
+ __func__);
}
EXPORT_SYMBOL(fmc_show_sdb_tree);
diff --git a/drivers/fmc/fmc-trivial.c b/drivers/fmc/fmc-trivial.c
index 6c590f54c79d..8defdee3e3a3 100644
--- a/drivers/fmc/fmc-trivial.c
+++ b/drivers/fmc/fmc-trivial.c
@@ -24,7 +24,7 @@ static irqreturn_t t_handler(int irq, void *dev_id)
{
struct fmc_device *fmc = dev_id;
- fmc->op->irq_ack(fmc);
+ fmc_irq_ack(fmc);
dev_info(&fmc->dev, "received irq %i\n", irq);
return IRQ_HANDLED;
}
@@ -46,25 +46,21 @@ static int t_probe(struct fmc_device *fmc)
int ret;
int index = 0;
- if (fmc->op->validate)
- index = fmc->op->validate(fmc, &t_drv);
+ index = fmc_validate(fmc, &t_drv);
if (index < 0)
return -EINVAL; /* not our device: invalid */
- ret = fmc->op->irq_request(fmc, t_handler, "fmc-trivial", IRQF_SHARED);
+ ret = fmc_irq_request(fmc, t_handler, "fmc-trivial", IRQF_SHARED);
if (ret < 0)
return ret;
/* ignore error code of call below, we really don't care */
- fmc->op->gpio_config(fmc, t_gpio, ARRAY_SIZE(t_gpio));
+ fmc_gpio_config(fmc, t_gpio, ARRAY_SIZE(t_gpio));
- /* Reprogram, if asked to. ESRCH == no filename specified */
- ret = -ESRCH;
- if (fmc->op->reprogram)
- ret = fmc->op->reprogram(fmc, &t_drv, "");
- if (ret == -ESRCH)
+ ret = fmc_reprogram(fmc, &t_drv, "", 0);
+ if (ret == -EPERM) /* programming not supported */
ret = 0;
if (ret < 0)
- fmc->op->irq_free(fmc);
+ fmc_irq_free(fmc);
/* FIXME: reprogram LM32 too */
return ret;
@@ -72,7 +68,7 @@ static int t_probe(struct fmc_device *fmc)
static int t_remove(struct fmc_device *fmc)
{
- fmc->op->irq_free(fmc);
+ fmc_irq_free(fmc);
return 0;
}
diff --git a/drivers/fmc/fmc-write-eeprom.c b/drivers/fmc/fmc-write-eeprom.c
index 9bb2cbd5c9f2..3eb81bb1f1fc 100644
--- a/drivers/fmc/fmc-write-eeprom.c
+++ b/drivers/fmc/fmc-write-eeprom.c
@@ -50,7 +50,7 @@ static int fwe_run_tlv(struct fmc_device *fmc, const struct firmware *fw,
if (write) {
dev_info(&fmc->dev, "write %i bytes at 0x%04x\n",
thislen, thisaddr);
- err = fmc->op->write_ee(fmc, thisaddr, p + 5, thislen);
+ err = fmc_write_ee(fmc, thisaddr, p + 5, thislen);
}
if (err < 0) {
dev_err(&fmc->dev, "write failure @0x%04x\n",
@@ -70,7 +70,7 @@ static int fwe_run_bin(struct fmc_device *fmc, const struct firmware *fw)
int ret;
dev_info(&fmc->dev, "programming %zi bytes\n", fw->size);
- ret = fmc->op->write_ee(fmc, 0, (void *)fw->data, fw->size);
+ ret = fmc_write_ee(fmc, 0, (void *)fw->data, fw->size);
if (ret < 0) {
dev_info(&fmc->dev, "write_eeprom: error %i\n", ret);
return ret;
@@ -115,8 +115,8 @@ static int fwe_probe(struct fmc_device *fmc)
KBUILD_MODNAME);
return -ENODEV;
}
- if (fmc->op->validate)
- index = fmc->op->validate(fmc, &fwe_drv);
+
+ index = fmc_validate(fmc, &fwe_drv);
if (index < 0) {
pr_err("%s: refusing device \"%s\"\n", KBUILD_MODNAME,
dev_name(dev));
diff --git a/drivers/fmc/fru-parse.c b/drivers/fmc/fru-parse.c
index cb46263c5da2..eb21480d399f 100644
--- a/drivers/fmc/fru-parse.c
+++ b/drivers/fmc/fru-parse.c
@@ -31,12 +31,11 @@ static char *__fru_alloc_get_tl(struct fru_common_header *header, int nr)
{
struct fru_type_length *tl;
char *res;
- int len;
tl = __fru_get_board_tl(header, nr);
if (!tl)
return NULL;
- len = fru_strlen(tl);
+
res = fru_alloc(fru_strlen(tl) + 1);
if (!res)
return NULL;
diff --git a/drivers/fpga/Kconfig b/drivers/fpga/Kconfig
index 161ba9dccede..ad5448f718b3 100644
--- a/drivers/fpga/Kconfig
+++ b/drivers/fpga/Kconfig
@@ -2,9 +2,7 @@
# FPGA framework configuration
#
-menu "FPGA Configuration Support"
-
-config FPGA
+menuconfig FPGA
tristate "FPGA Configuration Framework"
help
Say Y here if you want support for configuring FPGAs from the
@@ -26,6 +24,20 @@ config FPGA_MGR_ICE40_SPI
help
FPGA manager driver support for Lattice iCE40 FPGAs over SPI.
+config FPGA_MGR_ALTERA_CVP
+ tristate "Altera Arria-V/Cyclone-V/Stratix-V CvP FPGA Manager"
+ depends on PCI
+ help
+ FPGA manager driver support for Arria-V, Cyclone-V, Stratix-V
+ and Arria 10 Altera FPGAs using the CvP interface over PCIe.
+
+config FPGA_MGR_ALTERA_PS_SPI
+ tristate "Altera FPGA Passive Serial over SPI"
+ depends on SPI
+ help
+ FPGA manager driver support for Altera Arria/Cyclone/Stratix
+ using the passive serial interface over SPI.
+
config FPGA_MGR_SOCFPGA
tristate "Altera SOCFPGA FPGA Manager"
depends on ARCH_SOCFPGA || COMPILE_TEST
@@ -106,5 +118,3 @@ config XILINX_PR_DECOUPLER
being reprogrammed during partial reconfig.
endif # FPGA
-
-endmenu
diff --git a/drivers/fpga/Makefile b/drivers/fpga/Makefile
index 2a4f0218145c..e09895f0525b 100644
--- a/drivers/fpga/Makefile
+++ b/drivers/fpga/Makefile
@@ -6,6 +6,8 @@
obj-$(CONFIG_FPGA) += fpga-mgr.o
# FPGA Manager Drivers
+obj-$(CONFIG_FPGA_MGR_ALTERA_CVP) += altera-cvp.o
+obj-$(CONFIG_FPGA_MGR_ALTERA_PS_SPI) += altera-ps-spi.o
obj-$(CONFIG_FPGA_MGR_ICE40_SPI) += ice40-spi.o
obj-$(CONFIG_FPGA_MGR_SOCFPGA) += socfpga.o
obj-$(CONFIG_FPGA_MGR_SOCFPGA_A10) += socfpga-a10.o
diff --git a/drivers/fpga/altera-cvp.c b/drivers/fpga/altera-cvp.c
new file mode 100644
index 000000000000..08629ee69d11
--- /dev/null
+++ b/drivers/fpga/altera-cvp.c
@@ -0,0 +1,500 @@
+/*
+ * FPGA Manager Driver for Altera Arria/Cyclone/Stratix CvP
+ *
+ * Copyright (C) 2017 DENX Software Engineering
+ *
+ * Anatolij Gustschin <agust@denx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Manage Altera FPGA firmware using PCIe CvP.
+ * Firmware must be in binary "rbf" format.
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/fpga/fpga-mgr.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/sizes.h>
+
+#define CVP_BAR 0 /* BAR used for data transfer in memory mode */
+#define CVP_DUMMY_WR 244 /* dummy writes to clear CvP state machine */
+#define TIMEOUT_US 2000 /* CVP STATUS timeout for USERMODE polling */
+
+/* Vendor Specific Extended Capability Registers */
+#define VSE_PCIE_EXT_CAP_ID 0x200
+#define VSE_PCIE_EXT_CAP_ID_VAL 0x000b /* 16bit */
+
+#define VSE_CVP_STATUS 0x21c /* 32bit */
+#define VSE_CVP_STATUS_CFG_RDY BIT(18) /* CVP_CONFIG_READY */
+#define VSE_CVP_STATUS_CFG_ERR BIT(19) /* CVP_CONFIG_ERROR */
+#define VSE_CVP_STATUS_CVP_EN BIT(20) /* ctrl block is enabling CVP */
+#define VSE_CVP_STATUS_USERMODE BIT(21) /* USERMODE */
+#define VSE_CVP_STATUS_CFG_DONE BIT(23) /* CVP_CONFIG_DONE */
+#define VSE_CVP_STATUS_PLD_CLK_IN_USE BIT(24) /* PLD_CLK_IN_USE */
+
+#define VSE_CVP_MODE_CTRL 0x220 /* 32bit */
+#define VSE_CVP_MODE_CTRL_CVP_MODE BIT(0) /* CVP (1) or normal mode (0) */
+#define VSE_CVP_MODE_CTRL_HIP_CLK_SEL BIT(1) /* PMA (1) or fabric clock (0) */
+#define VSE_CVP_MODE_CTRL_NUMCLKS_OFF 8 /* NUMCLKS bits offset */
+#define VSE_CVP_MODE_CTRL_NUMCLKS_MASK GENMASK(15, 8)
+
+#define VSE_CVP_DATA 0x228 /* 32bit */
+#define VSE_CVP_PROG_CTRL 0x22c /* 32bit */
+#define VSE_CVP_PROG_CTRL_CONFIG BIT(0)
+#define VSE_CVP_PROG_CTRL_START_XFER BIT(1)
+
+#define VSE_UNCOR_ERR_STATUS 0x234 /* 32bit */
+#define VSE_UNCOR_ERR_CVP_CFG_ERR BIT(5) /* CVP_CONFIG_ERROR_LATCHED */
+
+#define DRV_NAME "altera-cvp"
+#define ALTERA_CVP_MGR_NAME "Altera CvP FPGA Manager"
+
+/* Optional CvP config error status check for debugging */
+static bool altera_cvp_chkcfg;
+
+struct altera_cvp_conf {
+ struct fpga_manager *mgr;
+ struct pci_dev *pci_dev;
+ void __iomem *map;
+ void (*write_data)(struct altera_cvp_conf *, u32);
+ char mgr_name[64];
+ u8 numclks;
+};
+
+static enum fpga_mgr_states altera_cvp_state(struct fpga_manager *mgr)
+{
+ struct altera_cvp_conf *conf = mgr->priv;
+ u32 status;
+
+ pci_read_config_dword(conf->pci_dev, VSE_CVP_STATUS, &status);
+
+ if (status & VSE_CVP_STATUS_CFG_DONE)
+ return FPGA_MGR_STATE_OPERATING;
+
+ if (status & VSE_CVP_STATUS_CVP_EN)
+ return FPGA_MGR_STATE_POWER_UP;
+
+ return FPGA_MGR_STATE_UNKNOWN;
+}
+
+static void altera_cvp_write_data_iomem(struct altera_cvp_conf *conf, u32 val)
+{
+ writel(val, conf->map);
+}
+
+static void altera_cvp_write_data_config(struct altera_cvp_conf *conf, u32 val)
+{
+ pci_write_config_dword(conf->pci_dev, VSE_CVP_DATA, val);
+}
+
+/* switches between CvP clock and internal clock */
+static void altera_cvp_dummy_write(struct altera_cvp_conf *conf)
+{
+ unsigned int i;
+ u32 val;
+
+ /* set 1 CVP clock cycle for every CVP Data Register Write */
+ pci_read_config_dword(conf->pci_dev, VSE_CVP_MODE_CTRL, &val);
+ val &= ~VSE_CVP_MODE_CTRL_NUMCLKS_MASK;
+ val |= 1 << VSE_CVP_MODE_CTRL_NUMCLKS_OFF;
+ pci_write_config_dword(conf->pci_dev, VSE_CVP_MODE_CTRL, val);
+
+ for (i = 0; i < CVP_DUMMY_WR; i++)
+ conf->write_data(conf, 0); /* dummy data, could be any value */
+}
+
+static int altera_cvp_wait_status(struct altera_cvp_conf *conf, u32 status_mask,
+ u32 status_val, int timeout_us)
+{
+ unsigned int retries;
+ u32 val;
+
+ retries = timeout_us / 10;
+ if (timeout_us % 10)
+ retries++;
+
+ do {
+ pci_read_config_dword(conf->pci_dev, VSE_CVP_STATUS, &val);
+ if ((val & status_mask) == status_val)
+ return 0;
+
+ /* use small usleep value to re-check and break early */
+ usleep_range(10, 11);
+ } while (--retries);
+
+ return -ETIMEDOUT;
+}
+
+static int altera_cvp_teardown(struct fpga_manager *mgr,
+ struct fpga_image_info *info)
+{
+ struct altera_cvp_conf *conf = mgr->priv;
+ struct pci_dev *pdev = conf->pci_dev;
+ int ret;
+ u32 val;
+
+ /* STEP 12 - reset START_XFER bit */
+ pci_read_config_dword(pdev, VSE_CVP_PROG_CTRL, &val);
+ val &= ~VSE_CVP_PROG_CTRL_START_XFER;
+ pci_write_config_dword(pdev, VSE_CVP_PROG_CTRL, val);
+
+ /* STEP 13 - reset CVP_CONFIG bit */
+ val &= ~VSE_CVP_PROG_CTRL_CONFIG;
+ pci_write_config_dword(pdev, VSE_CVP_PROG_CTRL, val);
+
+ /*
+ * STEP 14
+ * - set CVP_NUMCLKS to 1 and then issue CVP_DUMMY_WR dummy
+ * writes to the HIP
+ */
+ altera_cvp_dummy_write(conf); /* from CVP clock to internal clock */
+
+ /* STEP 15 - poll CVP_CONFIG_READY bit for 0 with 10us timeout */
+ ret = altera_cvp_wait_status(conf, VSE_CVP_STATUS_CFG_RDY, 0, 10);
+ if (ret)
+ dev_err(&mgr->dev, "CFG_RDY == 0 timeout\n");
+
+ return ret;
+}
+
+static int altera_cvp_write_init(struct fpga_manager *mgr,
+ struct fpga_image_info *info,
+ const char *buf, size_t count)
+{
+ struct altera_cvp_conf *conf = mgr->priv;
+ struct pci_dev *pdev = conf->pci_dev;
+ u32 iflags, val;
+ int ret;
+
+ iflags = info ? info->flags : 0;
+
+ if (iflags & FPGA_MGR_PARTIAL_RECONFIG) {
+ dev_err(&mgr->dev, "Partial reconfiguration not supported.\n");
+ return -EINVAL;
+ }
+
+ /* Determine allowed clock to data ratio */
+ if (iflags & FPGA_MGR_COMPRESSED_BITSTREAM)
+ conf->numclks = 8; /* ratio for all compressed images */
+ else if (iflags & FPGA_MGR_ENCRYPTED_BITSTREAM)
+ conf->numclks = 4; /* for uncompressed and encrypted images */
+ else
+ conf->numclks = 1; /* for uncompressed and unencrypted images */
+
+ /* STEP 1 - read CVP status and check CVP_EN flag */
+ pci_read_config_dword(pdev, VSE_CVP_STATUS, &val);
+ if (!(val & VSE_CVP_STATUS_CVP_EN)) {
+ dev_err(&mgr->dev, "CVP mode off: 0x%04x\n", val);
+ return -ENODEV;
+ }
+
+ if (val & VSE_CVP_STATUS_CFG_RDY) {
+ dev_warn(&mgr->dev, "CvP already started, teardown first\n");
+ ret = altera_cvp_teardown(mgr, info);
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * STEP 2
+ * - set HIP_CLK_SEL and CVP_MODE (must be set in the order mentioned)
+ */
+ /* switch from fabric to PMA clock */
+ pci_read_config_dword(pdev, VSE_CVP_MODE_CTRL, &val);
+ val |= VSE_CVP_MODE_CTRL_HIP_CLK_SEL;
+ pci_write_config_dword(pdev, VSE_CVP_MODE_CTRL, val);
+
+ /* set CVP mode */
+ pci_read_config_dword(pdev, VSE_CVP_MODE_CTRL, &val);
+ val |= VSE_CVP_MODE_CTRL_CVP_MODE;
+ pci_write_config_dword(pdev, VSE_CVP_MODE_CTRL, val);
+
+ /*
+ * STEP 3
+ * - set CVP_NUMCLKS to 1 and issue CVP_DUMMY_WR dummy writes to the HIP
+ */
+ altera_cvp_dummy_write(conf);
+
+ /* STEP 4 - set CVP_CONFIG bit */
+ pci_read_config_dword(pdev, VSE_CVP_PROG_CTRL, &val);
+ /* request control block to begin transfer using CVP */
+ val |= VSE_CVP_PROG_CTRL_CONFIG;
+ pci_write_config_dword(pdev, VSE_CVP_PROG_CTRL, val);
+
+ /* STEP 5 - poll CVP_CONFIG READY for 1 with 10us timeout */
+ ret = altera_cvp_wait_status(conf, VSE_CVP_STATUS_CFG_RDY,
+ VSE_CVP_STATUS_CFG_RDY, 10);
+ if (ret) {
+ dev_warn(&mgr->dev, "CFG_RDY == 1 timeout\n");
+ return ret;
+ }
+
+ /*
+ * STEP 6
+ * - set CVP_NUMCLKS to 1 and issue CVP_DUMMY_WR dummy writes to the HIP
+ */
+ altera_cvp_dummy_write(conf);
+
+ /* STEP 7 - set START_XFER */
+ pci_read_config_dword(pdev, VSE_CVP_PROG_CTRL, &val);
+ val |= VSE_CVP_PROG_CTRL_START_XFER;
+ pci_write_config_dword(pdev, VSE_CVP_PROG_CTRL, val);
+
+ /* STEP 8 - start transfer (set CVP_NUMCLKS for bitstream) */
+ pci_read_config_dword(pdev, VSE_CVP_MODE_CTRL, &val);
+ val &= ~VSE_CVP_MODE_CTRL_NUMCLKS_MASK;
+ val |= conf->numclks << VSE_CVP_MODE_CTRL_NUMCLKS_OFF;
+ pci_write_config_dword(pdev, VSE_CVP_MODE_CTRL, val);
+
+ return 0;
+}
+
+static inline int altera_cvp_chk_error(struct fpga_manager *mgr, size_t bytes)
+{
+ struct altera_cvp_conf *conf = mgr->priv;
+ u32 val;
+
+ /* STEP 10 (optional) - check CVP_CONFIG_ERROR flag */
+ pci_read_config_dword(conf->pci_dev, VSE_CVP_STATUS, &val);
+ if (val & VSE_CVP_STATUS_CFG_ERR) {
+ dev_err(&mgr->dev, "CVP_CONFIG_ERROR after %zu bytes!\n",
+ bytes);
+ return -EPROTO;
+ }
+ return 0;
+}
+
+static int altera_cvp_write(struct fpga_manager *mgr, const char *buf,
+ size_t count)
+{
+ struct altera_cvp_conf *conf = mgr->priv;
+ const u32 *data;
+ size_t done, remaining;
+ int status = 0;
+ u32 mask;
+
+ /* STEP 9 - write 32-bit data from RBF file to CVP data register */
+ data = (u32 *)buf;
+ remaining = count;
+ done = 0;
+
+ while (remaining >= 4) {
+ conf->write_data(conf, *data++);
+ done += 4;
+ remaining -= 4;
+
+ /*
+ * STEP 10 (optional) and STEP 11
+ * - check error flag
+ * - loop until data transfer completed
+ * Config images can be huge (more than 40 MiB), so
+ * only check after a new 4k data block has been written.
+ * This reduces the number of checks and speeds up the
+ * configuration process.
+ */
+ if (altera_cvp_chkcfg && !(done % SZ_4K)) {
+ status = altera_cvp_chk_error(mgr, done);
+ if (status < 0)
+ return status;
+ }
+ }
+
+ /* write up to 3 trailing bytes, if any */
+ mask = BIT(remaining * 8) - 1;
+ if (mask)
+ conf->write_data(conf, *data & mask);
+
+ if (altera_cvp_chkcfg)
+ status = altera_cvp_chk_error(mgr, count);
+
+ return status;
+}
+
+static int altera_cvp_write_complete(struct fpga_manager *mgr,
+ struct fpga_image_info *info)
+{
+ struct altera_cvp_conf *conf = mgr->priv;
+ struct pci_dev *pdev = conf->pci_dev;
+ int ret;
+ u32 mask;
+ u32 val;
+
+ ret = altera_cvp_teardown(mgr, info);
+ if (ret)
+ return ret;
+
+ /* STEP 16 - check CVP_CONFIG_ERROR_LATCHED bit */
+ pci_read_config_dword(pdev, VSE_UNCOR_ERR_STATUS, &val);
+ if (val & VSE_UNCOR_ERR_CVP_CFG_ERR) {
+ dev_err(&mgr->dev, "detected CVP_CONFIG_ERROR_LATCHED!\n");
+ return -EPROTO;
+ }
+
+ /* STEP 17 - reset CVP_MODE and HIP_CLK_SEL bit */
+ pci_read_config_dword(pdev, VSE_CVP_MODE_CTRL, &val);
+ val &= ~VSE_CVP_MODE_CTRL_HIP_CLK_SEL;
+ val &= ~VSE_CVP_MODE_CTRL_CVP_MODE;
+ pci_write_config_dword(pdev, VSE_CVP_MODE_CTRL, val);
+
+ /* STEP 18 - poll PLD_CLK_IN_USE and USER_MODE bits */
+ mask = VSE_CVP_STATUS_PLD_CLK_IN_USE | VSE_CVP_STATUS_USERMODE;
+ ret = altera_cvp_wait_status(conf, mask, mask, TIMEOUT_US);
+ if (ret)
+ dev_err(&mgr->dev, "PLD_CLK_IN_USE|USERMODE timeout\n");
+
+ return ret;
+}
+
+static const struct fpga_manager_ops altera_cvp_ops = {
+ .state = altera_cvp_state,
+ .write_init = altera_cvp_write_init,
+ .write = altera_cvp_write,
+ .write_complete = altera_cvp_write_complete,
+};
+
+static ssize_t show_chkcfg(struct device_driver *dev, char *buf)
+{
+ return snprintf(buf, 3, "%d\n", altera_cvp_chkcfg);
+}
+
+static ssize_t store_chkcfg(struct device_driver *drv, const char *buf,
+ size_t count)
+{
+ int ret;
+
+ ret = kstrtobool(buf, &altera_cvp_chkcfg);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static DRIVER_ATTR(chkcfg, 0600, show_chkcfg, store_chkcfg);
+
+static int altera_cvp_probe(struct pci_dev *pdev,
+ const struct pci_device_id *dev_id);
+static void altera_cvp_remove(struct pci_dev *pdev);
+
+#define PCI_VENDOR_ID_ALTERA 0x1172
+
+static struct pci_device_id altera_cvp_id_tbl[] = {
+ { PCI_VDEVICE(ALTERA, PCI_ANY_ID) },
+ { }
+};
+MODULE_DEVICE_TABLE(pci, altera_cvp_id_tbl);
+
+static struct pci_driver altera_cvp_driver = {
+ .name = DRV_NAME,
+ .id_table = altera_cvp_id_tbl,
+ .probe = altera_cvp_probe,
+ .remove = altera_cvp_remove,
+};
+
+static int altera_cvp_probe(struct pci_dev *pdev,
+ const struct pci_device_id *dev_id)
+{
+ struct altera_cvp_conf *conf;
+ u16 cmd, val;
+ int ret;
+
+ /*
+ * First check if this is the expected FPGA device. PCI config
+ * space access works without enabling the PCI device, memory
+ * space access is enabled further down.
+ */
+ pci_read_config_word(pdev, VSE_PCIE_EXT_CAP_ID, &val);
+ if (val != VSE_PCIE_EXT_CAP_ID_VAL) {
+ dev_err(&pdev->dev, "Wrong EXT_CAP_ID value 0x%x\n", val);
+ return -ENODEV;
+ }
+
+ conf = devm_kzalloc(&pdev->dev, sizeof(*conf), GFP_KERNEL);
+ if (!conf)
+ return -ENOMEM;
+
+ /*
+ * Enable memory BAR access. We cannot use pci_enable_device() here
+ * because it will make the driver unusable with FPGA devices that
+ * have additional big IOMEM resources (e.g. 4GiB BARs) on 32-bit
+ * platform. Such BARs will not have an assigned address range and
+ * pci_enable_device() will fail, complaining about not claimed BAR,
+ * even if the concerned BAR is not needed for FPGA configuration
+ * at all. Thus, enable the device via PCI config space command.
+ */
+ pci_read_config_word(pdev, PCI_COMMAND, &cmd);
+ if (!(cmd & PCI_COMMAND_MEMORY)) {
+ cmd |= PCI_COMMAND_MEMORY;
+ pci_write_config_word(pdev, PCI_COMMAND, cmd);
+ }
+
+ ret = pci_request_region(pdev, CVP_BAR, "CVP");
+ if (ret) {
+ dev_err(&pdev->dev, "Requesting CVP BAR region failed\n");
+ goto err_disable;
+ }
+
+ conf->pci_dev = pdev;
+ conf->write_data = altera_cvp_write_data_iomem;
+
+ conf->map = pci_iomap(pdev, CVP_BAR, 0);
+ if (!conf->map) {
+ dev_warn(&pdev->dev, "Mapping CVP BAR failed\n");
+ conf->write_data = altera_cvp_write_data_config;
+ }
+
+ snprintf(conf->mgr_name, sizeof(conf->mgr_name), "%s @%s",
+ ALTERA_CVP_MGR_NAME, pci_name(pdev));
+
+ ret = fpga_mgr_register(&pdev->dev, conf->mgr_name,
+ &altera_cvp_ops, conf);
+ if (ret)
+ goto err_unmap;
+
+ ret = driver_create_file(&altera_cvp_driver.driver,
+ &driver_attr_chkcfg);
+ if (ret) {
+ dev_err(&pdev->dev, "Can't create sysfs chkcfg file\n");
+ fpga_mgr_unregister(&pdev->dev);
+ goto err_unmap;
+ }
+
+ return 0;
+
+err_unmap:
+ pci_iounmap(pdev, conf->map);
+ pci_release_region(pdev, CVP_BAR);
+err_disable:
+ cmd &= ~PCI_COMMAND_MEMORY;
+ pci_write_config_word(pdev, PCI_COMMAND, cmd);
+ return ret;
+}
+
+static void altera_cvp_remove(struct pci_dev *pdev)
+{
+ struct fpga_manager *mgr = pci_get_drvdata(pdev);
+ struct altera_cvp_conf *conf = mgr->priv;
+ u16 cmd;
+
+ driver_remove_file(&altera_cvp_driver.driver, &driver_attr_chkcfg);
+ fpga_mgr_unregister(&pdev->dev);
+ pci_iounmap(pdev, conf->map);
+ pci_release_region(pdev, CVP_BAR);
+ pci_read_config_word(pdev, PCI_COMMAND, &cmd);
+ cmd &= ~PCI_COMMAND_MEMORY;
+ pci_write_config_word(pdev, PCI_COMMAND, cmd);
+}
+
+module_pci_driver(altera_cvp_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Anatolij Gustschin <agust@denx.de>");
+MODULE_DESCRIPTION("Module to load Altera FPGA over CvP");
diff --git a/drivers/fpga/altera-hps2fpga.c b/drivers/fpga/altera-hps2fpga.c
index 3066b805f2d0..406d2f10741f 100644
--- a/drivers/fpga/altera-hps2fpga.c
+++ b/drivers/fpga/altera-hps2fpga.c
@@ -66,7 +66,7 @@ static int alt_hps2fpga_enable_show(struct fpga_bridge *bridge)
/* The L3 REMAP register is write only, so keep a cached value. */
static unsigned int l3_remap_shadow;
-static spinlock_t l3_remap_lock;
+static DEFINE_SPINLOCK(l3_remap_lock);
static int _alt_hps2fpga_enable_set(struct altera_hps2fpga_data *priv,
bool enable)
@@ -143,9 +143,15 @@ static int alt_fpga_bridge_probe(struct platform_device *pdev)
int ret;
of_id = of_match_device(altera_fpga_of_match, dev);
+ if (!of_id) {
+ dev_err(dev, "failed to match device\n");
+ return -ENODEV;
+ }
+
priv = (struct altera_hps2fpga_data *)of_id->data;
- priv->bridge_reset = of_reset_control_get_by_index(dev->of_node, 0);
+ priv->bridge_reset = of_reset_control_get_exclusive_by_index(dev->of_node,
+ 0);
if (IS_ERR(priv->bridge_reset)) {
dev_err(dev, "Could not get %s reset control\n", priv->name);
return PTR_ERR(priv->bridge_reset);
@@ -171,8 +177,6 @@ static int alt_fpga_bridge_probe(struct platform_device *pdev)
return -EBUSY;
}
- spin_lock_init(&l3_remap_lock);
-
if (!of_property_read_u32(dev->of_node, "bridge-enable", &enable)) {
if (enable > 1) {
dev_warn(dev, "invalid bridge-enable %u > 1\n", enable);
diff --git a/drivers/fpga/altera-ps-spi.c b/drivers/fpga/altera-ps-spi.c
new file mode 100644
index 000000000000..14f14efdf0d5
--- /dev/null
+++ b/drivers/fpga/altera-ps-spi.c
@@ -0,0 +1,308 @@
+/*
+ * Altera Passive Serial SPI Driver
+ *
+ * Copyright (c) 2017 United Western Technologies, Corporation
+ *
+ * Joshua Clayton <stillcompiling@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * Manage Altera FPGA firmware that is loaded over SPI using the passive
+ * serial configuration method.
+ * Firmware must be in binary "rbf" format.
+ * Works on Arria 10, Cyclone V and Stratix V. Should work on Cyclone series.
+ * May work on other Altera FPGAs.
+ */
+
+#include <linux/bitrev.h>
+#include <linux/delay.h>
+#include <linux/fpga/fpga-mgr.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of_gpio.h>
+#include <linux/of_device.h>
+#include <linux/spi/spi.h>
+#include <linux/sizes.h>
+
+enum altera_ps_devtype {
+ CYCLONE5,
+ ARRIA10,
+};
+
+struct altera_ps_data {
+ enum altera_ps_devtype devtype;
+ int status_wait_min_us;
+ int status_wait_max_us;
+ int t_cfg_us;
+ int t_st2ck_us;
+};
+
+struct altera_ps_conf {
+ struct gpio_desc *config;
+ struct gpio_desc *confd;
+ struct gpio_desc *status;
+ struct spi_device *spi;
+ const struct altera_ps_data *data;
+ u32 info_flags;
+ char mgr_name[64];
+};
+
+/* | Arria 10 | Cyclone5 | Stratix5 |
+ * t_CF2ST0 | [; 600] | [; 600] | [; 600] |ns
+ * t_CFG | [2;] | [2;] | [2;] |µs
+ * t_STATUS | [268; 3000] | [268; 1506] | [268; 1506] |µs
+ * t_CF2ST1 | [; 3000] | [; 1506] | [; 1506] |µs
+ * t_CF2CK | [3010;] | [1506;] | [1506;] |µs
+ * t_ST2CK | [10;] | [2;] | [2;] |µs
+ * t_CD2UM | [175; 830] | [175; 437] | [175; 437] |µs
+ */
+static struct altera_ps_data c5_data = {
+ /* these values for Cyclone5 are compatible with Stratix5 */
+ .devtype = CYCLONE5,
+ .status_wait_min_us = 268,
+ .status_wait_max_us = 1506,
+ .t_cfg_us = 2,
+ .t_st2ck_us = 2,
+};
+
+static struct altera_ps_data a10_data = {
+ .devtype = ARRIA10,
+ .status_wait_min_us = 268, /* min(t_STATUS) */
+ .status_wait_max_us = 3000, /* max(t_CF2ST1) */
+ .t_cfg_us = 2, /* max { min(t_CFG), max(tCF2ST0) } */
+ .t_st2ck_us = 10, /* min(t_ST2CK) */
+};
+
+static const struct of_device_id of_ef_match[] = {
+ { .compatible = "altr,fpga-passive-serial", .data = &c5_data },
+ { .compatible = "altr,fpga-arria10-passive-serial", .data = &a10_data },
+ {}
+};
+MODULE_DEVICE_TABLE(of, of_ef_match);
+
+static enum fpga_mgr_states altera_ps_state(struct fpga_manager *mgr)
+{
+ struct altera_ps_conf *conf = mgr->priv;
+
+ if (gpiod_get_value_cansleep(conf->status))
+ return FPGA_MGR_STATE_RESET;
+
+ return FPGA_MGR_STATE_UNKNOWN;
+}
+
+static inline void altera_ps_delay(int delay_us)
+{
+ if (delay_us > 10)
+ usleep_range(delay_us, delay_us + 5);
+ else
+ udelay(delay_us);
+}
+
+static int altera_ps_write_init(struct fpga_manager *mgr,
+ struct fpga_image_info *info,
+ const char *buf, size_t count)
+{
+ struct altera_ps_conf *conf = mgr->priv;
+ int min, max, waits;
+ int i;
+
+ conf->info_flags = info->flags;
+
+ if (info->flags & FPGA_MGR_PARTIAL_RECONFIG) {
+ dev_err(&mgr->dev, "Partial reconfiguration not supported.\n");
+ return -EINVAL;
+ }
+
+ gpiod_set_value_cansleep(conf->config, 1);
+
+ /* wait min reset pulse time */
+ altera_ps_delay(conf->data->t_cfg_us);
+
+ if (!gpiod_get_value_cansleep(conf->status)) {
+ dev_err(&mgr->dev, "Status pin failed to show a reset\n");
+ return -EIO;
+ }
+
+ gpiod_set_value_cansleep(conf->config, 0);
+
+ min = conf->data->status_wait_min_us;
+ max = conf->data->status_wait_max_us;
+ waits = max / min;
+ if (max % min)
+ waits++;
+
+ /* wait for max { max(t_STATUS), max(t_CF2ST1) } */
+ for (i = 0; i < waits; i++) {
+ usleep_range(min, min + 10);
+ if (!gpiod_get_value_cansleep(conf->status)) {
+ /* wait for min(t_ST2CK)*/
+ altera_ps_delay(conf->data->t_st2ck_us);
+ return 0;
+ }
+ }
+
+ dev_err(&mgr->dev, "Status pin not ready.\n");
+ return -EIO;
+}
+
+static void rev_buf(char *buf, size_t len)
+{
+ u32 *fw32 = (u32 *)buf;
+ size_t extra_bytes = (len & 0x03);
+ const u32 *fw_end = (u32 *)(buf + len - extra_bytes);
+
+ /* set buffer to lsb first */
+ while (fw32 < fw_end) {
+ *fw32 = bitrev8x4(*fw32);
+ fw32++;
+ }
+
+ if (extra_bytes) {
+ buf = (char *)fw_end;
+ while (extra_bytes) {
+ *buf = bitrev8(*buf);
+ buf++;
+ extra_bytes--;
+ }
+ }
+}
+
+static int altera_ps_write(struct fpga_manager *mgr, const char *buf,
+ size_t count)
+{
+ struct altera_ps_conf *conf = mgr->priv;
+ const char *fw_data = buf;
+ const char *fw_data_end = fw_data + count;
+
+ while (fw_data < fw_data_end) {
+ int ret;
+ size_t stride = min_t(size_t, fw_data_end - fw_data, SZ_4K);
+
+ if (!(conf->info_flags & FPGA_MGR_BITSTREAM_LSB_FIRST))
+ rev_buf((char *)fw_data, stride);
+
+ ret = spi_write(conf->spi, fw_data, stride);
+ if (ret) {
+ dev_err(&mgr->dev, "spi error in firmware write: %d\n",
+ ret);
+ return ret;
+ }
+ fw_data += stride;
+ }
+
+ return 0;
+}
+
+static int altera_ps_write_complete(struct fpga_manager *mgr,
+ struct fpga_image_info *info)
+{
+ struct altera_ps_conf *conf = mgr->priv;
+ const char dummy[] = {0};
+ int ret;
+
+ if (gpiod_get_value_cansleep(conf->status)) {
+ dev_err(&mgr->dev, "Error during configuration.\n");
+ return -EIO;
+ }
+
+ if (!IS_ERR(conf->confd)) {
+ if (!gpiod_get_raw_value_cansleep(conf->confd)) {
+ dev_err(&mgr->dev, "CONF_DONE is inactive!\n");
+ return -EIO;
+ }
+ }
+
+ /*
+ * After CONF_DONE goes high, send two additional falling edges on DCLK
+ * to begin initialization and enter user mode
+ */
+ ret = spi_write(conf->spi, dummy, 1);
+ if (ret) {
+ dev_err(&mgr->dev, "spi error during end sequence: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct fpga_manager_ops altera_ps_ops = {
+ .state = altera_ps_state,
+ .write_init = altera_ps_write_init,
+ .write = altera_ps_write,
+ .write_complete = altera_ps_write_complete,
+};
+
+static int altera_ps_probe(struct spi_device *spi)
+{
+ struct altera_ps_conf *conf;
+ const struct of_device_id *of_id;
+
+ conf = devm_kzalloc(&spi->dev, sizeof(*conf), GFP_KERNEL);
+ if (!conf)
+ return -ENOMEM;
+
+ of_id = of_match_device(of_ef_match, &spi->dev);
+ if (!of_id)
+ return -ENODEV;
+
+ conf->data = of_id->data;
+ conf->spi = spi;
+ conf->config = devm_gpiod_get(&spi->dev, "nconfig", GPIOD_OUT_HIGH);
+ if (IS_ERR(conf->config)) {
+ dev_err(&spi->dev, "Failed to get config gpio: %ld\n",
+ PTR_ERR(conf->config));
+ return PTR_ERR(conf->config);
+ }
+
+ conf->status = devm_gpiod_get(&spi->dev, "nstat", GPIOD_IN);
+ if (IS_ERR(conf->status)) {
+ dev_err(&spi->dev, "Failed to get status gpio: %ld\n",
+ PTR_ERR(conf->status));
+ return PTR_ERR(conf->status);
+ }
+
+ conf->confd = devm_gpiod_get(&spi->dev, "confd", GPIOD_IN);
+ if (IS_ERR(conf->confd)) {
+ dev_warn(&spi->dev, "Not using confd gpio: %ld\n",
+ PTR_ERR(conf->confd));
+ }
+
+ /* Register manager with unique name */
+ snprintf(conf->mgr_name, sizeof(conf->mgr_name), "%s %s",
+ dev_driver_string(&spi->dev), dev_name(&spi->dev));
+
+ return fpga_mgr_register(&spi->dev, conf->mgr_name,
+ &altera_ps_ops, conf);
+}
+
+static int altera_ps_remove(struct spi_device *spi)
+{
+ fpga_mgr_unregister(&spi->dev);
+
+ return 0;
+}
+
+static const struct spi_device_id altera_ps_spi_ids[] = {
+ {"cyclone-ps-spi", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(spi, altera_ps_spi_ids);
+
+static struct spi_driver altera_ps_driver = {
+ .driver = {
+ .name = "altera-ps-spi",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(of_ef_match),
+ },
+ .id_table = altera_ps_spi_ids,
+ .probe = altera_ps_probe,
+ .remove = altera_ps_remove,
+};
+
+module_spi_driver(altera_ps_driver)
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Joshua Clayton <stillcompiling@gmail.com>");
+MODULE_DESCRIPTION("Module to load Altera FPGA firmware over SPI");
diff --git a/drivers/fpga/fpga-region.c b/drivers/fpga/fpga-region.c
index 3b6b2f4182a1..d9ab7c75b14f 100644
--- a/drivers/fpga/fpga-region.c
+++ b/drivers/fpga/fpga-region.c
@@ -319,8 +319,8 @@ static int child_regions_with_firmware(struct device_node *overlay)
of_node_put(child_region);
if (ret)
- pr_err("firmware-name not allowed in child FPGA region: %s",
- child_region->full_name);
+ pr_err("firmware-name not allowed in child FPGA region: %pOF",
+ child_region);
return ret;
}
diff --git a/drivers/fsi/fsi-core.c b/drivers/fsi/fsi-core.c
index 06432d84cbf8..4ea63d9bd131 100644
--- a/drivers/fsi/fsi-core.c
+++ b/drivers/fsi/fsi-core.c
@@ -475,7 +475,7 @@ static ssize_t fsi_slave_sysfs_raw_write(struct file *file,
return count;
}
-static struct bin_attribute fsi_slave_raw_attr = {
+static const struct bin_attribute fsi_slave_raw_attr = {
.attr = {
.name = "raw",
.mode = 0600,
@@ -499,7 +499,7 @@ static ssize_t fsi_slave_sysfs_term_write(struct file *file,
return count;
}
-static struct bin_attribute fsi_slave_term_attr = {
+static const struct bin_attribute fsi_slave_term_attr = {
.attr = {
.name = "term",
.mode = 0200,
diff --git a/drivers/fsi/fsi-scom.c b/drivers/fsi/fsi-scom.c
index 98d062fd353e..e13353a2fd7c 100644
--- a/drivers/fsi/fsi-scom.c
+++ b/drivers/fsi/fsi-scom.c
@@ -57,12 +57,6 @@ static int put_scom(struct scom_device *scom_dev, uint64_t value,
int rc;
uint32_t data;
- data = cpu_to_be32(SCOM_RESET_CMD);
- rc = fsi_device_write(scom_dev->fsi_dev, SCOM_RESET_REG, &data,
- sizeof(uint32_t));
- if (rc)
- return rc;
-
data = cpu_to_be32((value >> 32) & 0xffffffff);
rc = fsi_device_write(scom_dev->fsi_dev, SCOM_DATA0_REG, &data,
sizeof(uint32_t));
@@ -186,6 +180,7 @@ static const struct file_operations scom_fops = {
static int scom_probe(struct device *dev)
{
+ uint32_t data;
struct fsi_device *fsi_dev = to_fsi_dev(dev);
struct scom_device *scom;
@@ -202,6 +197,9 @@ static int scom_probe(struct device *dev)
scom->mdev.parent = dev;
list_add(&scom->link, &scom_devices);
+ data = cpu_to_be32(SCOM_RESET_CMD);
+ fsi_device_write(fsi_dev, SCOM_RESET_REG, &data, sizeof(uint32_t));
+
return misc_register(&scom->mdev);
}
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 461d6fc3688b..3388d54ba114 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -311,7 +311,7 @@ config GPIO_MOCKUP
depends on GPIOLIB && SYSFS
select GPIO_SYSFS
select GPIOLIB_IRQCHIP
- select IRQ_WORK
+ select IRQ_SIM
help
This enables GPIO Testing driver, which provides a way to test GPIO
subsystem through sysfs(or char device) and debugfs. GPIO_SYSFS
@@ -450,6 +450,15 @@ config GPIO_TS4800
help
This driver support TS-4800 FPGA GPIO controllers.
+config GPIO_THUNDERX
+ tristate "Cavium ThunderX/OCTEON-TX GPIO"
+ depends on ARCH_THUNDER || (64BIT && COMPILE_TEST)
+ depends on PCI_MSI && IRQ_DOMAIN_HIERARCHY
+ select IRQ_FASTEOI_HIERARCHY_HANDLERS
+ help
+ Say yes here to support the on-chip GPIO lines on the ThunderX
+ and OCTEON-TX families of SoCs.
+
config GPIO_TZ1090
bool "Toumaz Xenif TZ1090 GPIO support"
depends on SOC_TZ1090
@@ -1065,6 +1074,21 @@ config GPIO_TPS65912
help
This driver supports TPS65912 gpio chip
+config GPIO_TPS68470
+ bool "TPS68470 GPIO"
+ depends on MFD_TPS68470
+ help
+ Select this option to enable GPIO driver for the TPS68470
+ chip family.
+ There are 7 GPIOs and few sensor related GPIOs supported
+ by the TPS68470. While the 7 GPIOs can be configured as
+ input or output as appropriate, the sensor related GPIOs
+ are "output only" GPIOs.
+
+ This driver config is bool, as the GPIO functionality
+ of the TPS68470 must be available before dependent
+ drivers are loaded.
+
config GPIO_TWL4030
tristate "TWL4030, TWL5030, and TPS659x0 GPIOs"
depends on TWL4030_CORE
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index a9fda6c55113..aeb70e9de6f2 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -113,6 +113,7 @@ obj-$(CONFIG_GPIO_SYSCON) += gpio-syscon.o
obj-$(CONFIG_GPIO_TB10X) += gpio-tb10x.o
obj-$(CONFIG_GPIO_TC3589X) += gpio-tc3589x.o
obj-$(CONFIG_GPIO_TEGRA) += gpio-tegra.o
+obj-$(CONFIG_GPIO_THUNDERX) += gpio-thunderx.o
obj-$(CONFIG_GPIO_TIMBERDALE) += gpio-timberdale.o
obj-$(CONFIG_GPIO_PALMAS) += gpio-palmas.o
obj-$(CONFIG_GPIO_TPIC2810) += gpio-tpic2810.o
@@ -121,6 +122,7 @@ obj-$(CONFIG_GPIO_TPS65218) += gpio-tps65218.o
obj-$(CONFIG_GPIO_TPS6586X) += gpio-tps6586x.o
obj-$(CONFIG_GPIO_TPS65910) += gpio-tps65910.o
obj-$(CONFIG_GPIO_TPS65912) += gpio-tps65912.o
+obj-$(CONFIG_GPIO_TPS68470) += gpio-tps68470.o
obj-$(CONFIG_GPIO_TS4800) += gpio-ts4800.o
obj-$(CONFIG_GPIO_TS4900) += gpio-ts4900.o
obj-$(CONFIG_GPIO_TS5500) += gpio-ts5500.o
diff --git a/drivers/gpio/devres.c b/drivers/gpio/devres.c
index a75511d1ea5d..afbff155a0ba 100644
--- a/drivers/gpio/devres.c
+++ b/drivers/gpio/devres.c
@@ -132,6 +132,7 @@ EXPORT_SYMBOL(devm_gpiod_get_index);
* @index: index of the GPIO to obtain in the consumer
* @child: firmware node (child of @dev)
* @flags: GPIO initialization flags
+ * @label: label to attach to the requested GPIO
*
* GPIO descriptors returned from this function are automatically disposed on
* driver detach.
@@ -271,6 +272,7 @@ EXPORT_SYMBOL(devm_gpiod_get_array_optional);
/**
* devm_gpiod_put - Resource-managed gpiod_put()
+ * @dev: GPIO consumer
* @desc: GPIO descriptor to dispose of
*
* Dispose of a GPIO descriptor obtained with devm_gpiod_get() or
@@ -286,6 +288,7 @@ EXPORT_SYMBOL(devm_gpiod_put);
/**
* devm_gpiod_put_array - Resource-managed gpiod_put_array()
+ * @dev: GPIO consumer
* @descs: GPIO descriptor array to dispose of
*
* Dispose of an array of GPIO descriptors obtained with devm_gpiod_get_array().
diff --git a/drivers/gpio/gpio-74x164.c b/drivers/gpio/gpio-74x164.c
index a6607faf2fdf..6b535ec858cc 100644
--- a/drivers/gpio/gpio-74x164.c
+++ b/drivers/gpio/gpio-74x164.c
@@ -9,6 +9,7 @@
* published by the Free Software Foundation.
*/
+#include <linux/gpio/consumer.h>
#include <linux/init.h>
#include <linux/mutex.h>
#include <linux/spi/spi.h>
@@ -31,6 +32,7 @@ struct gen_74x164_chip {
* numbering, store the bytes in reverse order.
*/
u8 buffer[0];
+ struct gpio_desc *gpiod_oe;
};
static int __gen_74x164_write_config(struct gen_74x164_chip *chip)
@@ -126,6 +128,13 @@ static int gen_74x164_probe(struct spi_device *spi)
if (!chip)
return -ENOMEM;
+ chip->gpiod_oe = devm_gpiod_get_optional(&spi->dev, "enable",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(chip->gpiod_oe))
+ return PTR_ERR(chip->gpiod_oe);
+
+ gpiod_set_value_cansleep(chip->gpiod_oe, 1);
+
spi_set_drvdata(spi, chip);
chip->gpio_chip.label = spi->modalias;
@@ -164,6 +173,7 @@ static int gen_74x164_remove(struct spi_device *spi)
{
struct gen_74x164_chip *chip = spi_get_drvdata(spi);
+ gpiod_set_value_cansleep(chip->gpiod_oe, 0);
gpiochip_remove(&chip->gpio_chip);
mutex_destroy(&chip->lock);
diff --git a/drivers/gpio/gpio-altera-a10sr.c b/drivers/gpio/gpio-altera-a10sr.c
index 16a8951b2bed..6b11f1314248 100644
--- a/drivers/gpio/gpio-altera-a10sr.c
+++ b/drivers/gpio/gpio-altera-a10sr.c
@@ -71,7 +71,7 @@ static int altr_a10sr_gpio_direction_output(struct gpio_chip *gc,
return -EINVAL;
}
-static struct gpio_chip altr_a10sr_gc = {
+static const struct gpio_chip altr_a10sr_gc = {
.label = "altr_a10sr_gpio",
.owner = THIS_MODULE,
.get = altr_a10sr_gpio_get,
diff --git a/drivers/gpio/gpio-altera.c b/drivers/gpio/gpio-altera.c
index 17485dc20384..ccc02ed65b3c 100644
--- a/drivers/gpio/gpio-altera.c
+++ b/drivers/gpio/gpio-altera.c
@@ -324,8 +324,8 @@ skip_irq:
return 0;
teardown:
of_mm_gpiochip_remove(&altera_gc->mmchip);
- pr_err("%s: registration failed with status %d\n",
- node->full_name, ret);
+ pr_err("%pOF: registration failed with status %d\n",
+ node, ret);
return ret;
}
diff --git a/drivers/gpio/gpio-aspeed.c b/drivers/gpio/gpio-aspeed.c
index 4ca436e66bdb..bfc53995064a 100644
--- a/drivers/gpio/gpio-aspeed.c
+++ b/drivers/gpio/gpio-aspeed.c
@@ -834,7 +834,7 @@ static int __init aspeed_gpio_probe(struct platform_device *pdev)
gpio->clk = of_clk_get(pdev->dev.of_node, 0);
if (IS_ERR(gpio->clk)) {
dev_warn(&pdev->dev,
- "No HPLL clock phandle provided, debouncing disabled\n");
+ "Failed to get clock from devicetree, debouncing disabled\n");
gpio->clk = NULL;
}
diff --git a/drivers/gpio/gpio-brcmstb.c b/drivers/gpio/gpio-brcmstb.c
index e6489143721a..dd0308cc8bb0 100644
--- a/drivers/gpio/gpio-brcmstb.c
+++ b/drivers/gpio/gpio-brcmstb.c
@@ -339,6 +339,7 @@ static int brcmstb_gpio_irq_setup(struct platform_device *pdev,
struct brcmstb_gpio_priv *priv = bank->parent_priv;
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
+ int err;
bank->irq_chip.name = dev_name(dev);
bank->irq_chip.irq_mask = brcmstb_gpio_irq_mask;
@@ -355,8 +356,6 @@ static int brcmstb_gpio_irq_setup(struct platform_device *pdev,
dev_warn(dev,
"Couldn't get wake IRQ - GPIOs will not be able to wake from sleep");
} else {
- int err;
-
/*
* Set wakeup capability before requesting wakeup
* interrupt, so we can process boot-time "wakeups"
@@ -383,8 +382,10 @@ static int brcmstb_gpio_irq_setup(struct platform_device *pdev,
if (priv->can_wake)
bank->irq_chip.irq_set_wake = brcmstb_gpio_irq_set_wake;
- gpiochip_irqchip_add(&bank->gc, &bank->irq_chip, 0,
- handle_simple_irq, IRQ_TYPE_NONE);
+ err = gpiochip_irqchip_add(&bank->gc, &bank->irq_chip, 0,
+ handle_simple_irq, IRQ_TYPE_NONE);
+ if (err)
+ return err;
gpiochip_set_chained_irqchip(&bank->gc, &bank->irq_chip,
priv->parent_irq, brcmstb_gpio_irq_handler);
@@ -483,7 +484,7 @@ static int brcmstb_gpio_probe(struct platform_device *pdev)
gc->of_node = np;
gc->owner = THIS_MODULE;
- gc->label = np->full_name;
+ gc->label = devm_kasprintf(dev, GFP_KERNEL, "%pOF", dev->of_node);
gc->base = gpio_base;
gc->of_gpio_n_cells = 2;
gc->of_xlate = brcmstb_gpio_of_xlate;
diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c
index 65cb359308e3..f75d8443ecaf 100644
--- a/drivers/gpio/gpio-davinci.c
+++ b/drivers/gpio/gpio-davinci.c
@@ -166,7 +166,7 @@ of_err:
static int davinci_gpio_probe(struct platform_device *pdev)
{
static int ctrl_num, bank_base;
- int gpio, bank;
+ int gpio, bank, ret = 0;
unsigned ngpio, nbank;
struct davinci_gpio_controller *chips;
struct davinci_gpio_platform_data *pdata;
@@ -232,10 +232,23 @@ static int davinci_gpio_probe(struct platform_device *pdev)
for (gpio = 0, bank = 0; gpio < ngpio; gpio += 32, bank++)
chips->regs[bank] = gpio_base + offset_array[bank];
- gpiochip_add_data(&chips->chip, chips);
+ ret = devm_gpiochip_add_data(dev, &chips->chip, chips);
+ if (ret)
+ goto err;
+
platform_set_drvdata(pdev, chips);
- davinci_gpio_irq_setup(pdev);
+ ret = davinci_gpio_irq_setup(pdev);
+ if (ret)
+ goto err;
+
return 0;
+
+err:
+ /* Revert the static variable increments */
+ ctrl_num--;
+ bank_base -= ngpio;
+
+ return ret;
}
/*--------------------------------------------------------------------------*/
@@ -477,8 +490,7 @@ static int davinci_gpio_irq_setup(struct platform_device *pdev)
clk = devm_clk_get(dev, "gpio");
if (IS_ERR(clk)) {
- printk(KERN_ERR "Error %ld getting gpio clock?\n",
- PTR_ERR(clk));
+ dev_err(dev, "Error %ld getting gpio clock\n", PTR_ERR(clk));
return PTR_ERR(clk);
}
ret = clk_prepare_enable(clk);
diff --git a/drivers/gpio/gpio-ge.c b/drivers/gpio/gpio-ge.c
index 8650b2916f87..6f5a7fe9787d 100644
--- a/drivers/gpio/gpio-ge.c
+++ b/drivers/gpio/gpio-ge.c
@@ -76,8 +76,7 @@ static int __init gef_gpio_probe(struct platform_device *pdev)
}
/* Setup pointers to chip functions */
- gc->label = devm_kstrdup(&pdev->dev, pdev->dev.of_node->full_name,
- GFP_KERNEL);
+ gc->label = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%pOF", pdev->dev.of_node);
if (!gc->label) {
ret = -ENOMEM;
goto err0;
@@ -96,8 +95,7 @@ static int __init gef_gpio_probe(struct platform_device *pdev)
return 0;
err0:
iounmap(regs);
- pr_err("%s: GPIO chip registration failed\n",
- pdev->dev.of_node->full_name);
+ pr_err("%pOF: GPIO chip registration failed\n", pdev->dev.of_node);
return ret;
};
diff --git a/drivers/gpio/gpio-grgpio.c b/drivers/gpio/gpio-grgpio.c
index 7847dd34f86f..6544a16ab02e 100644
--- a/drivers/gpio/gpio-grgpio.c
+++ b/drivers/gpio/gpio-grgpio.c
@@ -367,7 +367,7 @@ static int grgpio_probe(struct platform_device *ofdev)
gc->of_node = np;
gc->owner = THIS_MODULE;
gc->to_irq = grgpio_to_irq;
- gc->label = np->full_name;
+ gc->label = devm_kasprintf(&ofdev->dev, GFP_KERNEL, "%pOF", np);
gc->base = -1;
err = of_property_read_u32(np, "nbits", &prop);
diff --git a/drivers/gpio/gpio-it87.c b/drivers/gpio/gpio-it87.c
index 45d29e488dbb..d43d0a2cc4c5 100644
--- a/drivers/gpio/gpio-it87.c
+++ b/drivers/gpio/gpio-it87.c
@@ -2,6 +2,7 @@
* GPIO interface for IT87xx Super I/O chips
*
* Author: Diego Elio Pettenò <flameeyes@flameeyes.eu>
+ * Copyright (c) 2017 Google, Inc.
*
* Based on it87_wdt.c by Oliver Schuster
* gpio-it8761e.c by Denis Turischev
@@ -39,6 +40,7 @@
#define IT8728_ID 0x8728
#define IT8732_ID 0x8732
#define IT8761_ID 0x8761
+#define IT8772_ID 0x8772
/* IO Ports */
#define REG 0x2e
@@ -314,6 +316,7 @@ static int __init it87_gpio_init(void)
break;
case IT8728_ID:
case IT8732_ID:
+ case IT8772_ID:
gpio_ba_reg = 0x62;
it87_gpio->io_size = 8;
it87_gpio->output_base = 0xc8;
diff --git a/drivers/gpio/gpio-max77620.c b/drivers/gpio/gpio-max77620.c
index 743459d9477d..538bce4b5b42 100644
--- a/drivers/gpio/gpio-max77620.c
+++ b/drivers/gpio/gpio-max77620.c
@@ -82,7 +82,7 @@ static const struct regmap_irq max77620_gpio_irqs[] = {
},
};
-static struct regmap_irq_chip max77620_gpio_irq_chip = {
+static const struct regmap_irq_chip max77620_gpio_irq_chip = {
.name = "max77620-gpio",
.irqs = max77620_gpio_irqs,
.num_irqs = ARRAY_SIZE(max77620_gpio_irqs),
diff --git a/drivers/gpio/gpio-mb86s7x.c b/drivers/gpio/gpio-mb86s7x.c
index ffb73f688ae1..94d772677ed6 100644
--- a/drivers/gpio/gpio-mb86s7x.c
+++ b/drivers/gpio/gpio-mb86s7x.c
@@ -168,7 +168,9 @@ static int mb86s70_gpio_probe(struct platform_device *pdev)
if (IS_ERR(gchip->clk))
return PTR_ERR(gchip->clk);
- clk_prepare_enable(gchip->clk);
+ ret = clk_prepare_enable(gchip->clk);
+ if (ret)
+ return ret;
spin_lock_init(&gchip->lock);
diff --git a/drivers/gpio/gpio-ml-ioh.c b/drivers/gpio/gpio-ml-ioh.c
index 74fdce096c26..4b80e996d976 100644
--- a/drivers/gpio/gpio-ml-ioh.c
+++ b/drivers/gpio/gpio-ml-ioh.c
@@ -391,9 +391,10 @@ static int ioh_gpio_alloc_generic_chip(struct ioh_gpio *chip,
{
struct irq_chip_generic *gc;
struct irq_chip_type *ct;
+ int rv;
- gc = irq_alloc_generic_chip("ioh_gpio", 1, irq_start, chip->base,
- handle_simple_irq);
+ gc = devm_irq_alloc_generic_chip(chip->dev, "ioh_gpio", 1, irq_start,
+ chip->base, handle_simple_irq);
if (!gc)
return -ENOMEM;
@@ -406,10 +407,11 @@ static int ioh_gpio_alloc_generic_chip(struct ioh_gpio *chip,
ct->chip.irq_disable = ioh_irq_disable;
ct->chip.irq_enable = ioh_irq_enable;
- irq_setup_generic_chip(gc, IRQ_MSK(num), IRQ_GC_INIT_MASK_CACHE,
- IRQ_NOREQUEST | IRQ_NOPROBE, 0);
+ rv = devm_irq_setup_generic_chip(chip->dev, gc, IRQ_MSK(num),
+ IRQ_GC_INIT_MASK_CACHE,
+ IRQ_NOREQUEST | IRQ_NOPROBE, 0);
- return 0;
+ return rv;
}
static int ioh_gpio_probe(struct pci_dev *pdev,
diff --git a/drivers/gpio/gpio-mockup.c b/drivers/gpio/gpio-mockup.c
index a6565e128f9e..9532d86a82f7 100644
--- a/drivers/gpio/gpio-mockup.c
+++ b/drivers/gpio/gpio-mockup.c
@@ -20,7 +20,7 @@
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
-#include <linux/irq_work.h>
+#include <linux/irq_sim.h>
#include <linux/debugfs.h>
#include <linux/uaccess.h>
@@ -47,18 +47,12 @@ enum {
struct gpio_mockup_line_status {
int dir;
bool value;
- bool irq_enabled;
-};
-
-struct gpio_mockup_irq_context {
- struct irq_work work;
- int irq;
};
struct gpio_mockup_chip {
struct gpio_chip gc;
struct gpio_mockup_line_status *lines;
- struct gpio_mockup_irq_context irq_ctx;
+ struct irq_sim irqsim;
struct dentry *dbg_dir;
};
@@ -144,65 +138,11 @@ static int gpio_mockup_name_lines(struct device *dev,
return 0;
}
-static int gpio_mockup_to_irq(struct gpio_chip *chip, unsigned int offset)
-{
- return chip->irq_base + offset;
-}
-
-static void gpio_mockup_irqmask(struct irq_data *data)
-{
- struct gpio_chip *gc = irq_data_get_irq_chip_data(data);
- struct gpio_mockup_chip *chip = gpiochip_get_data(gc);
-
- chip->lines[data->irq - gc->irq_base].irq_enabled = false;
-}
-
-static void gpio_mockup_irqunmask(struct irq_data *data)
+static int gpio_mockup_to_irq(struct gpio_chip *gc, unsigned int offset)
{
- struct gpio_chip *gc = irq_data_get_irq_chip_data(data);
struct gpio_mockup_chip *chip = gpiochip_get_data(gc);
- chip->lines[data->irq - gc->irq_base].irq_enabled = true;
-}
-
-static struct irq_chip gpio_mockup_irqchip = {
- .name = GPIO_MOCKUP_NAME,
- .irq_mask = gpio_mockup_irqmask,
- .irq_unmask = gpio_mockup_irqunmask,
-};
-
-static void gpio_mockup_handle_irq(struct irq_work *work)
-{
- struct gpio_mockup_irq_context *irq_ctx;
-
- irq_ctx = container_of(work, struct gpio_mockup_irq_context, work);
- handle_simple_irq(irq_to_desc(irq_ctx->irq));
-}
-
-static int gpio_mockup_irqchip_setup(struct device *dev,
- struct gpio_mockup_chip *chip)
-{
- struct gpio_chip *gc = &chip->gc;
- int irq_base, i;
-
- irq_base = devm_irq_alloc_descs(dev, -1, 0, gc->ngpio, 0);
- if (irq_base < 0)
- return irq_base;
-
- gc->irq_base = irq_base;
- gc->irqchip = &gpio_mockup_irqchip;
-
- for (i = 0; i < gc->ngpio; i++) {
- irq_set_chip(irq_base + i, gc->irqchip);
- irq_set_chip_data(irq_base + i, gc);
- irq_set_handler(irq_base + i, &handle_simple_irq);
- irq_modify_status(irq_base + i,
- IRQ_NOREQUEST | IRQ_NOAUTOEN, IRQ_NOPROBE);
- }
-
- init_irq_work(&chip->irq_ctx.work, gpio_mockup_handle_irq);
-
- return 0;
+ return irq_sim_irqnum(&chip->irqsim, offset);
}
static ssize_t gpio_mockup_event_write(struct file *file,
@@ -213,7 +153,6 @@ static ssize_t gpio_mockup_event_write(struct file *file,
struct gpio_mockup_chip *chip;
struct seq_file *sfile;
struct gpio_desc *desc;
- struct gpio_chip *gc;
int rv, val;
rv = kstrtoint_from_user(usr_buf, size, 0, &val);
@@ -226,13 +165,9 @@ static ssize_t gpio_mockup_event_write(struct file *file,
priv = sfile->private;
desc = priv->desc;
chip = priv->chip;
- gc = &chip->gc;
- if (chip->lines[priv->offset].irq_enabled) {
- gpiod_set_value_cansleep(desc, val);
- priv->chip->irq_ctx.irq = gc->irq_base + priv->offset;
- irq_work_queue(&priv->chip->irq_ctx.work);
- }
+ gpiod_set_value_cansleep(desc, val);
+ irq_sim_fire(&chip->irqsim, priv->offset);
return size;
}
@@ -319,7 +254,7 @@ static int gpio_mockup_add(struct device *dev,
return ret;
}
- ret = gpio_mockup_irqchip_setup(dev, chip);
+ ret = devm_irq_sim_init(dev, &chip->irqsim, gc->ngpio);
if (ret)
return ret;
diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c
index 793518a30afe..8c93dec498fa 100644
--- a/drivers/gpio/gpio-mpc8xxx.c
+++ b/drivers/gpio/gpio-mpc8xxx.c
@@ -348,8 +348,8 @@ static int mpc8xxx_probe(struct platform_device *pdev)
ret = gpiochip_add_data(gc, mpc8xxx_gc);
if (ret) {
- pr_err("%s: GPIO chip registration failed with status %d\n",
- np->full_name, ret);
+ pr_err("%pOF: GPIO chip registration failed with status %d\n",
+ np, ret);
goto err;
}
diff --git a/drivers/gpio/gpio-msic.c b/drivers/gpio/gpio-msic.c
index 1b7ce7f85886..6cb67595d15f 100644
--- a/drivers/gpio/gpio-msic.c
+++ b/drivers/gpio/gpio-msic.c
@@ -265,8 +265,8 @@ static int platform_msic_gpio_probe(struct platform_device *pdev)
int i;
if (irq < 0) {
- dev_err(dev, "no IRQ line\n");
- return -EINVAL;
+ dev_err(dev, "no IRQ line: %d\n", irq);
+ return irq;
}
if (!pdata || !pdata->gpio_base) {
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
index e338c3743562..45c65f805fd6 100644
--- a/drivers/gpio/gpio-mvebu.c
+++ b/drivers/gpio/gpio-mvebu.c
@@ -557,7 +557,7 @@ static void mvebu_gpio_irq_handler(struct irq_desc *desc)
edge_cause = mvebu_gpio_read_edge_cause(mvchip);
edge_mask = mvebu_gpio_read_edge_mask(mvchip);
- cause = (data_in ^ level_mask) | (edge_cause & edge_mask);
+ cause = (data_in & level_mask) | (edge_cause & edge_mask);
for (i = 0; i < mvchip->chip.ngpio; i++) {
int irq;
diff --git a/drivers/gpio/gpio-mxc.c b/drivers/gpio/gpio-mxc.c
index 92692251ade1..5245a2fe62ae 100644
--- a/drivers/gpio/gpio-mxc.c
+++ b/drivers/gpio/gpio-mxc.c
@@ -66,6 +66,7 @@ struct mxc_gpio_port {
int irq_high;
struct irq_domain *domain;
struct gpio_chip gc;
+ struct device *dev;
u32 both_edges;
};
@@ -324,29 +325,31 @@ static int gpio_set_wake_irq(struct irq_data *d, u32 enable)
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct mxc_gpio_port *port = gc->private;
u32 gpio_idx = d->hwirq;
+ int ret;
if (enable) {
if (port->irq_high && (gpio_idx >= 16))
- enable_irq_wake(port->irq_high);
+ ret = enable_irq_wake(port->irq_high);
else
- enable_irq_wake(port->irq);
+ ret = enable_irq_wake(port->irq);
} else {
if (port->irq_high && (gpio_idx >= 16))
- disable_irq_wake(port->irq_high);
+ ret = disable_irq_wake(port->irq_high);
else
- disable_irq_wake(port->irq);
+ ret = disable_irq_wake(port->irq);
}
- return 0;
+ return ret;
}
static int mxc_gpio_init_gc(struct mxc_gpio_port *port, int irq_base)
{
struct irq_chip_generic *gc;
struct irq_chip_type *ct;
+ int rv;
- gc = irq_alloc_generic_chip("gpio-mxc", 1, irq_base,
- port->base, handle_level_irq);
+ gc = devm_irq_alloc_generic_chip(port->dev, "gpio-mxc", 1, irq_base,
+ port->base, handle_level_irq);
if (!gc)
return -ENOMEM;
gc->private = port;
@@ -361,10 +364,11 @@ static int mxc_gpio_init_gc(struct mxc_gpio_port *port, int irq_base)
ct->regs.ack = GPIO_ISR;
ct->regs.mask = GPIO_IMR;
- irq_setup_generic_chip(gc, IRQ_MSK(32), IRQ_GC_INIT_NESTED_LOCK,
- IRQ_NOREQUEST, 0);
+ rv = devm_irq_setup_generic_chip(port->dev, gc, IRQ_MSK(32),
+ IRQ_GC_INIT_NESTED_LOCK,
+ IRQ_NOREQUEST, 0);
- return 0;
+ return rv;
}
static void mxc_gpio_get_hw(struct platform_device *pdev)
@@ -418,6 +422,8 @@ static int mxc_gpio_probe(struct platform_device *pdev)
if (!port)
return -ENOMEM;
+ port->dev = &pdev->dev;
+
iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
port->base = devm_ioremap_resource(&pdev->dev, iores);
if (IS_ERR(port->base))
@@ -507,6 +513,7 @@ static struct platform_driver mxc_gpio_driver = {
.driver = {
.name = "gpio-mxc",
.of_match_table = mxc_gpio_dt_ids,
+ .suppress_bind_attrs = true,
},
.probe = mxc_gpio_probe,
.id_table = mxc_gpio_devtype,
diff --git a/drivers/gpio/gpio-mxs.c b/drivers/gpio/gpio-mxs.c
index 6ae583f36733..435def22445d 100644
--- a/drivers/gpio/gpio-mxs.c
+++ b/drivers/gpio/gpio-mxs.c
@@ -66,6 +66,7 @@ struct mxs_gpio_port {
int irq;
struct irq_domain *domain;
struct gpio_chip gc;
+ struct device *dev;
enum mxs_gpio_id devid;
u32 both_edges;
};
@@ -209,9 +210,10 @@ static int mxs_gpio_init_gc(struct mxs_gpio_port *port, int irq_base)
{
struct irq_chip_generic *gc;
struct irq_chip_type *ct;
+ int rv;
- gc = irq_alloc_generic_chip("gpio-mxs", 2, irq_base,
- port->base, handle_level_irq);
+ gc = devm_irq_alloc_generic_chip(port->dev, "gpio-mxs", 2, irq_base,
+ port->base, handle_level_irq);
if (!gc)
return -ENOMEM;
@@ -242,10 +244,11 @@ static int mxs_gpio_init_gc(struct mxs_gpio_port *port, int irq_base)
ct->regs.disable = PINCTRL_IRQEN(port) + MXS_CLR;
ct->handler = handle_level_irq;
- irq_setup_generic_chip(gc, IRQ_MSK(32), IRQ_GC_INIT_NESTED_LOCK,
- IRQ_NOREQUEST, 0);
+ rv = devm_irq_setup_generic_chip(port->dev, gc, IRQ_MSK(32),
+ IRQ_GC_INIT_NESTED_LOCK,
+ IRQ_NOREQUEST, 0);
- return 0;
+ return rv;
}
static int mxs_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
@@ -304,6 +307,7 @@ static int mxs_gpio_probe(struct platform_device *pdev)
if (port->id < 0)
return port->id;
port->devid = (enum mxs_gpio_id) of_id->data;
+ port->dev = &pdev->dev;
port->irq = platform_get_irq(pdev, 0);
if (port->irq < 0)
return port->irq;
@@ -379,6 +383,7 @@ static struct platform_driver mxs_gpio_driver = {
.driver = {
.name = "gpio-mxs",
.of_match_table = mxs_gpio_dt_ids,
+ .suppress_bind_attrs = true,
},
.probe = mxs_gpio_probe,
.id_table = mxs_gpio_ids,
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index f8c550de6c72..dbf869fb63ce 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -1247,6 +1247,8 @@ static int omap_gpio_probe(struct platform_device *pdev)
if (ret) {
pm_runtime_put_sync(dev);
pm_runtime_disable(dev);
+ if (bank->dbck_flag)
+ clk_unprepare(bank->dbck);
return ret;
}
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index 4c9e21300a26..1b9dbf691ae7 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -187,10 +187,9 @@ static int pca953x_write_regs_8(struct pca953x_chip *chip, int reg, u8 *val)
static int pca953x_write_regs_16(struct pca953x_chip *chip, int reg, u8 *val)
{
- __le16 word = cpu_to_le16(get_unaligned((u16 *)val));
+ u16 word = get_unaligned((u16 *)val);
- return i2c_smbus_write_word_data(chip->client,
- reg << 1, (__force u16)word);
+ return i2c_smbus_write_word_data(chip->client, reg << 1, word);
}
static int pca957x_write_regs_16(struct pca953x_chip *chip, int reg, u8 *val)
@@ -241,8 +240,7 @@ static int pca953x_read_regs_16(struct pca953x_chip *chip, int reg, u8 *val)
int ret;
ret = i2c_smbus_read_word_data(chip->client, reg << 1);
- val[0] = (u16)ret & 0xFF;
- val[1] = (u16)ret >> 8;
+ put_unaligned(ret, (u16 *)val);
return ret;
}
diff --git a/drivers/gpio/gpio-pch.c b/drivers/gpio/gpio-pch.c
index f6600f8ada52..68c6d0c5a6d1 100644
--- a/drivers/gpio/gpio-pch.c
+++ b/drivers/gpio/gpio-pch.c
@@ -337,9 +337,10 @@ static int pch_gpio_alloc_generic_chip(struct pch_gpio *chip,
{
struct irq_chip_generic *gc;
struct irq_chip_type *ct;
+ int rv;
- gc = irq_alloc_generic_chip("pch_gpio", 1, irq_start, chip->base,
- handle_simple_irq);
+ gc = devm_irq_alloc_generic_chip(chip->dev, "pch_gpio", 1, irq_start,
+ chip->base, handle_simple_irq);
if (!gc)
return -ENOMEM;
@@ -351,10 +352,11 @@ static int pch_gpio_alloc_generic_chip(struct pch_gpio *chip,
ct->chip.irq_unmask = pch_irq_unmask;
ct->chip.irq_set_type = pch_irq_type;
- irq_setup_generic_chip(gc, IRQ_MSK(num), IRQ_GC_INIT_MASK_CACHE,
- IRQ_NOREQUEST | IRQ_NOPROBE, 0);
+ rv = devm_irq_setup_generic_chip(chip->dev, gc, IRQ_MSK(num),
+ IRQ_GC_INIT_MASK_CACHE,
+ IRQ_NOREQUEST | IRQ_NOPROBE, 0);
- return 0;
+ return rv;
}
static int pch_gpio_probe(struct pci_dev *pdev,
diff --git a/drivers/gpio/gpio-pl061.c b/drivers/gpio/gpio-pl061.c
index 3d3d6b6645a7..6aaaab79c205 100644
--- a/drivers/gpio/gpio-pl061.c
+++ b/drivers/gpio/gpio-pl061.c
@@ -405,7 +405,7 @@ static const struct dev_pm_ops pl061_dev_pm_ops = {
};
#endif
-static struct amba_id pl061_ids[] = {
+static const struct amba_id pl061_ids[] = {
{
.id = 0x00041061,
.mask = 0x000fffff,
diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c
index 832f3e46ba9f..6029899789f3 100644
--- a/drivers/gpio/gpio-pxa.c
+++ b/drivers/gpio/gpio-pxa.c
@@ -451,7 +451,9 @@ static irqreturn_t pxa_gpio_demux_handler(int in_irq, void *d)
for_each_set_bit(n, &gedr, BITS_PER_LONG) {
loop = 1;
- generic_handle_irq(gpio_to_irq(gpio + n));
+ generic_handle_irq(
+ irq_find_mapping(pchip->irqdomain,
+ gpio + n));
}
}
handled += loop;
@@ -465,9 +467,9 @@ static irqreturn_t pxa_gpio_direct_handler(int in_irq, void *d)
struct pxa_gpio_chip *pchip = d;
if (in_irq == pchip->irq0) {
- generic_handle_irq(gpio_to_irq(0));
+ generic_handle_irq(irq_find_mapping(pchip->irqdomain, 0));
} else if (in_irq == pchip->irq1) {
- generic_handle_irq(gpio_to_irq(1));
+ generic_handle_irq(irq_find_mapping(pchip->irqdomain, 1));
} else {
pr_err("%s() unknown irq %d\n", __func__, in_irq);
return IRQ_NONE;
diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
index 4a1536a050bc..1f0871553fd2 100644
--- a/drivers/gpio/gpio-rcar.c
+++ b/drivers/gpio/gpio-rcar.c
@@ -371,6 +371,16 @@ static const struct of_device_id gpio_rcar_of_table[] = {
/* Gen3 GPIO is identical to Gen2. */
.data = &gpio_rcar_info_gen2,
}, {
+ .compatible = "renesas,rcar-gen1-gpio",
+ .data = &gpio_rcar_info_gen1,
+ }, {
+ .compatible = "renesas,rcar-gen2-gpio",
+ .data = &gpio_rcar_info_gen2,
+ }, {
+ .compatible = "renesas,rcar-gen3-gpio",
+ /* Gen3 GPIO is identical to Gen2. */
+ .data = &gpio_rcar_info_gen2,
+ }, {
.compatible = "renesas,gpio-rcar",
.data = &gpio_rcar_info_gen1,
}, {
diff --git a/drivers/gpio/gpio-sta2x11.c b/drivers/gpio/gpio-sta2x11.c
index 9e705162da8d..407359da08f9 100644
--- a/drivers/gpio/gpio-sta2x11.c
+++ b/drivers/gpio/gpio-sta2x11.c
@@ -324,9 +324,11 @@ static int gsta_alloc_irq_chip(struct gsta_gpio *chip)
{
struct irq_chip_generic *gc;
struct irq_chip_type *ct;
+ int rv;
- gc = irq_alloc_generic_chip(KBUILD_MODNAME, 1, chip->irq_base,
- chip->reg_base, handle_simple_irq);
+ gc = devm_irq_alloc_generic_chip(chip->dev, KBUILD_MODNAME, 1,
+ chip->irq_base,
+ chip->reg_base, handle_simple_irq);
if (!gc)
return -ENOMEM;
@@ -338,8 +340,11 @@ static int gsta_alloc_irq_chip(struct gsta_gpio *chip)
ct->chip.irq_enable = gsta_irq_enable;
/* FIXME: this makes at most 32 interrupts. Request 0 by now */
- irq_setup_generic_chip(gc, 0 /* IRQ_MSK(GSTA_GPIO_PER_BLOCK) */, 0,
- IRQ_NOREQUEST | IRQ_NOPROBE, 0);
+ rv = devm_irq_setup_generic_chip(chip->dev, gc,
+ 0 /* IRQ_MSK(GSTA_GPIO_PER_BLOCK) */,
+ 0, IRQ_NOREQUEST | IRQ_NOPROBE, 0);
+ if (rv)
+ return rv;
/* Set up all all 128 interrupts: code from setup_generic_chip */
{
@@ -432,6 +437,7 @@ static int gsta_probe(struct platform_device *dev)
static struct platform_driver sta2x11_gpio_platform_driver = {
.driver = {
.name = "sta2x11-gpio",
+ .suppress_bind_attrs = true,
},
.probe = gsta_probe,
};
diff --git a/drivers/gpio/gpio-tb10x.c b/drivers/gpio/gpio-tb10x.c
index 80b6959ae995..091ffaaec635 100644
--- a/drivers/gpio/gpio-tb10x.c
+++ b/drivers/gpio/gpio-tb10x.c
@@ -191,7 +191,8 @@ static int tb10x_gpio_probe(struct platform_device *pdev)
if (IS_ERR(tb10x_gpio->base))
return PTR_ERR(tb10x_gpio->base);
- tb10x_gpio->gc.label = of_node_full_name(dn);
+ tb10x_gpio->gc.label =
+ devm_kasprintf(&pdev->dev, GFP_KERNEL, "%pOF", pdev->dev.of_node);
tb10x_gpio->gc.parent = &pdev->dev;
tb10x_gpio->gc.owner = THIS_MODULE;
tb10x_gpio->gc.direction_input = tb10x_gpio_direction_in;
diff --git a/drivers/gpio/gpio-tegra.c b/drivers/gpio/gpio-tegra.c
index 506c6a67c5fc..fbaf974277df 100644
--- a/drivers/gpio/gpio-tegra.c
+++ b/drivers/gpio/gpio-tegra.c
@@ -67,8 +67,8 @@
struct tegra_gpio_info;
struct tegra_gpio_bank {
- int bank;
- int irq;
+ unsigned int bank;
+ unsigned int irq;
spinlock_t lvl_lock[4];
spinlock_t dbc_lock[4]; /* Lock for updating debounce count register */
#ifdef CONFIG_PM_SLEEP
@@ -112,13 +112,14 @@ static inline u32 tegra_gpio_readl(struct tegra_gpio_info *tgi, u32 reg)
return __raw_readl(tgi->regs + reg);
}
-static int tegra_gpio_compose(int bank, int port, int bit)
+static unsigned int tegra_gpio_compose(unsigned int bank, unsigned int port,
+ unsigned int bit)
{
return (bank << 5) | ((port & 0x3) << 3) | (bit & 0x7);
}
static void tegra_gpio_mask_write(struct tegra_gpio_info *tgi, u32 reg,
- int gpio, int value)
+ unsigned int gpio, u32 value)
{
u32 val;
@@ -128,22 +129,22 @@ static void tegra_gpio_mask_write(struct tegra_gpio_info *tgi, u32 reg,
tegra_gpio_writel(tgi, val, reg);
}
-static void tegra_gpio_enable(struct tegra_gpio_info *tgi, int gpio)
+static void tegra_gpio_enable(struct tegra_gpio_info *tgi, unsigned int gpio)
{
tegra_gpio_mask_write(tgi, GPIO_MSK_CNF(tgi, gpio), gpio, 1);
}
-static void tegra_gpio_disable(struct tegra_gpio_info *tgi, int gpio)
+static void tegra_gpio_disable(struct tegra_gpio_info *tgi, unsigned int gpio)
{
tegra_gpio_mask_write(tgi, GPIO_MSK_CNF(tgi, gpio), gpio, 0);
}
-static int tegra_gpio_request(struct gpio_chip *chip, unsigned offset)
+static int tegra_gpio_request(struct gpio_chip *chip, unsigned int offset)
{
return pinctrl_request_gpio(offset);
}
-static void tegra_gpio_free(struct gpio_chip *chip, unsigned offset)
+static void tegra_gpio_free(struct gpio_chip *chip, unsigned int offset)
{
struct tegra_gpio_info *tgi = gpiochip_get_data(chip);
@@ -151,17 +152,18 @@ static void tegra_gpio_free(struct gpio_chip *chip, unsigned offset)
tegra_gpio_disable(tgi, offset);
}
-static void tegra_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+static void tegra_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct tegra_gpio_info *tgi = gpiochip_get_data(chip);
tegra_gpio_mask_write(tgi, GPIO_MSK_OUT(tgi, offset), offset, value);
}
-static int tegra_gpio_get(struct gpio_chip *chip, unsigned offset)
+static int tegra_gpio_get(struct gpio_chip *chip, unsigned int offset)
{
struct tegra_gpio_info *tgi = gpiochip_get_data(chip);
- int bval = BIT(GPIO_BIT(offset));
+ unsigned int bval = BIT(GPIO_BIT(offset));
/* If gpio is in output mode then read from the out value */
if (tegra_gpio_readl(tgi, GPIO_OE(tgi, offset)) & bval)
@@ -170,7 +172,8 @@ static int tegra_gpio_get(struct gpio_chip *chip, unsigned offset)
return !!(tegra_gpio_readl(tgi, GPIO_IN(tgi, offset)) & bval);
}
-static int tegra_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
+static int tegra_gpio_direction_input(struct gpio_chip *chip,
+ unsigned int offset)
{
struct tegra_gpio_info *tgi = gpiochip_get_data(chip);
@@ -179,8 +182,9 @@ static int tegra_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
return 0;
}
-static int tegra_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
- int value)
+static int tegra_gpio_direction_output(struct gpio_chip *chip,
+ unsigned int offset,
+ int value)
{
struct tegra_gpio_info *tgi = gpiochip_get_data(chip);
@@ -190,7 +194,8 @@ static int tegra_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
return 0;
}
-static int tegra_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
+static int tegra_gpio_get_direction(struct gpio_chip *chip,
+ unsigned int offset)
{
struct tegra_gpio_info *tgi = gpiochip_get_data(chip);
u32 pin_mask = BIT(GPIO_BIT(offset));
@@ -212,7 +217,7 @@ static int tegra_gpio_set_debounce(struct gpio_chip *chip, unsigned int offset,
struct tegra_gpio_bank *bank = &tgi->bank_info[GPIO_BANK(offset)];
unsigned int debounce_ms = DIV_ROUND_UP(debounce, 1000);
unsigned long flags;
- int port;
+ unsigned int port;
if (!debounce_ms) {
tegra_gpio_mask_write(tgi, GPIO_MSK_DBC_EN(tgi, offset),
@@ -250,7 +255,7 @@ static int tegra_gpio_set_config(struct gpio_chip *chip, unsigned int offset,
return tegra_gpio_set_debounce(chip, offset, debounce);
}
-static int tegra_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
+static int tegra_gpio_to_irq(struct gpio_chip *chip, unsigned int offset)
{
struct tegra_gpio_info *tgi = gpiochip_get_data(chip);
@@ -261,7 +266,7 @@ static void tegra_gpio_irq_ack(struct irq_data *d)
{
struct tegra_gpio_bank *bank = irq_data_get_irq_chip_data(d);
struct tegra_gpio_info *tgi = bank->tgi;
- int gpio = d->hwirq;
+ unsigned int gpio = d->hwirq;
tegra_gpio_writel(tgi, 1 << GPIO_BIT(gpio), GPIO_INT_CLR(tgi, gpio));
}
@@ -270,7 +275,7 @@ static void tegra_gpio_irq_mask(struct irq_data *d)
{
struct tegra_gpio_bank *bank = irq_data_get_irq_chip_data(d);
struct tegra_gpio_info *tgi = bank->tgi;
- int gpio = d->hwirq;
+ unsigned int gpio = d->hwirq;
tegra_gpio_mask_write(tgi, GPIO_MSK_INT_ENB(tgi, gpio), gpio, 0);
}
@@ -279,20 +284,18 @@ static void tegra_gpio_irq_unmask(struct irq_data *d)
{
struct tegra_gpio_bank *bank = irq_data_get_irq_chip_data(d);
struct tegra_gpio_info *tgi = bank->tgi;
- int gpio = d->hwirq;
+ unsigned int gpio = d->hwirq;
tegra_gpio_mask_write(tgi, GPIO_MSK_INT_ENB(tgi, gpio), gpio, 1);
}
static int tegra_gpio_irq_set_type(struct irq_data *d, unsigned int type)
{
- int gpio = d->hwirq;
+ unsigned int gpio = d->hwirq, port = GPIO_PORT(gpio), lvl_type;
struct tegra_gpio_bank *bank = irq_data_get_irq_chip_data(d);
struct tegra_gpio_info *tgi = bank->tgi;
- int port = GPIO_PORT(gpio);
- int lvl_type;
- int val;
unsigned long flags;
+ u32 val;
int ret;
switch (type & IRQ_TYPE_SENSE_MASK) {
@@ -323,7 +326,7 @@ static int tegra_gpio_irq_set_type(struct irq_data *d, unsigned int type)
ret = gpiochip_lock_as_irq(&tgi->gc, gpio);
if (ret) {
dev_err(tgi->dev,
- "unable to lock Tegra GPIO %d as IRQ\n", gpio);
+ "unable to lock Tegra GPIO %u as IRQ\n", gpio);
return ret;
}
@@ -351,17 +354,15 @@ static void tegra_gpio_irq_shutdown(struct irq_data *d)
{
struct tegra_gpio_bank *bank = irq_data_get_irq_chip_data(d);
struct tegra_gpio_info *tgi = bank->tgi;
- int gpio = d->hwirq;
+ unsigned int gpio = d->hwirq;
gpiochip_unlock_as_irq(&tgi->gc, gpio);
}
static void tegra_gpio_irq_handler(struct irq_desc *desc)
{
- int port;
- int pin;
+ unsigned int port, pin, gpio;
bool unmasked = false;
- int gpio;
u32 lvl;
unsigned long sta;
struct irq_chip *chip = irq_desc_get_chip(desc);
@@ -389,7 +390,8 @@ static void tegra_gpio_irq_handler(struct irq_desc *desc)
chained_irq_exit(chip, desc);
}
- generic_handle_irq(gpio_to_irq(gpio + pin));
+ generic_handle_irq(irq_find_mapping(tgi->irq_domain,
+ gpio + pin));
}
}
@@ -404,8 +406,7 @@ static int tegra_gpio_resume(struct device *dev)
struct platform_device *pdev = to_platform_device(dev);
struct tegra_gpio_info *tgi = platform_get_drvdata(pdev);
unsigned long flags;
- int b;
- int p;
+ unsigned int b, p;
local_irq_save(flags);
@@ -413,7 +414,8 @@ static int tegra_gpio_resume(struct device *dev)
struct tegra_gpio_bank *bank = &tgi->bank_info[b];
for (p = 0; p < ARRAY_SIZE(bank->oe); p++) {
- unsigned int gpio = (b<<5) | (p<<3);
+ unsigned int gpio = (b << 5) | (p << 3);
+
tegra_gpio_writel(tgi, bank->cnf[p],
GPIO_CNF(tgi, gpio));
@@ -444,15 +446,15 @@ static int tegra_gpio_suspend(struct device *dev)
struct platform_device *pdev = to_platform_device(dev);
struct tegra_gpio_info *tgi = platform_get_drvdata(pdev);
unsigned long flags;
- int b;
- int p;
+ unsigned int b, p;
local_irq_save(flags);
for (b = 0; b < tgi->bank_count; b++) {
struct tegra_gpio_bank *bank = &tgi->bank_info[b];
for (p = 0; p < ARRAY_SIZE(bank->oe); p++) {
- unsigned int gpio = (b<<5) | (p<<3);
+ unsigned int gpio = (b << 5) | (p << 3);
+
bank->cnf[p] = tegra_gpio_readl(tgi,
GPIO_CNF(tgi, gpio));
bank->out[p] = tegra_gpio_readl(tgi,
@@ -483,7 +485,7 @@ static int tegra_gpio_suspend(struct device *dev)
static int tegra_gpio_irq_set_wake(struct irq_data *d, unsigned int enable)
{
struct tegra_gpio_bank *bank = irq_data_get_irq_chip_data(d);
- int gpio = d->hwirq;
+ unsigned int gpio = d->hwirq;
u32 port, bit, mask;
port = GPIO_PORT(gpio);
@@ -507,14 +509,14 @@ static int tegra_gpio_irq_set_wake(struct irq_data *d, unsigned int enable)
static int dbg_gpio_show(struct seq_file *s, void *unused)
{
struct tegra_gpio_info *tgi = s->private;
- int i;
- int j;
+ unsigned int i, j;
for (i = 0; i < tgi->bank_count; i++) {
for (j = 0; j < 4; j++) {
- int gpio = tegra_gpio_compose(i, j, 0);
+ unsigned int gpio = tegra_gpio_compose(i, j, 0);
+
seq_printf(s,
- "%d:%d %02x %02x %02x %02x %02x %02x %06x\n",
+ "%u:%u %02x %02x %02x %02x %02x %02x %06x\n",
i, j,
tegra_gpio_readl(tgi, GPIO_CNF(tgi, gpio)),
tegra_gpio_readl(tgi, GPIO_OE(tgi, gpio)),
@@ -542,7 +544,7 @@ static const struct file_operations debug_fops = {
static void tegra_gpio_debuginit(struct tegra_gpio_info *tgi)
{
- (void) debugfs_create_file("tegra_gpio", S_IRUGO,
+ (void) debugfs_create_file("tegra_gpio", 0444,
NULL, tgi, &debug_fops);
}
@@ -566,35 +568,25 @@ static struct lock_class_key gpio_lock_class;
static int tegra_gpio_probe(struct platform_device *pdev)
{
- const struct tegra_gpio_soc_config *config;
struct tegra_gpio_info *tgi;
struct resource *res;
struct tegra_gpio_bank *bank;
+ unsigned int gpio, i, j;
int ret;
- int gpio;
- int i;
- int j;
-
- config = of_device_get_match_data(&pdev->dev);
- if (!config) {
- dev_err(&pdev->dev, "Error: No device match found\n");
- return -ENODEV;
- }
tgi = devm_kzalloc(&pdev->dev, sizeof(*tgi), GFP_KERNEL);
if (!tgi)
return -ENODEV;
- tgi->soc = config;
+ tgi->soc = of_device_get_match_data(&pdev->dev);
tgi->dev = &pdev->dev;
- for (;;) {
- res = platform_get_resource(pdev, IORESOURCE_IRQ,
- tgi->bank_count);
- if (!res)
- break;
- tgi->bank_count++;
- }
+ ret = platform_irq_count(pdev);
+ if (ret < 0)
+ return ret;
+
+ tgi->bank_count = ret;
+
if (!tgi->bank_count) {
dev_err(&pdev->dev, "Missing IRQ resource\n");
return -ENODEV;
@@ -626,13 +618,13 @@ static int tegra_gpio_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, tgi);
- if (config->debounce_supported)
+ if (tgi->soc->debounce_supported)
tgi->gc.set_config = tegra_gpio_set_config;
- tgi->bank_info = devm_kzalloc(&pdev->dev, tgi->bank_count *
+ tgi->bank_info = devm_kcalloc(&pdev->dev, tgi->bank_count,
sizeof(*tgi->bank_info), GFP_KERNEL);
if (!tgi->bank_info)
- return -ENODEV;
+ return -ENOMEM;
tgi->irq_domain = irq_domain_add_linear(pdev->dev.of_node,
tgi->gc.ngpio,
@@ -641,15 +633,15 @@ static int tegra_gpio_probe(struct platform_device *pdev)
return -ENODEV;
for (i = 0; i < tgi->bank_count; i++) {
- res = platform_get_resource(pdev, IORESOURCE_IRQ, i);
- if (!res) {
- dev_err(&pdev->dev, "Missing IRQ resource\n");
- return -ENODEV;
+ ret = platform_get_irq(pdev, i);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Missing IRQ resource: %d\n", ret);
+ return ret;
}
bank = &tgi->bank_info[i];
bank->bank = i;
- bank->irq = res->start;
+ bank->irq = ret;
bank->tgi = tgi;
}
@@ -661,6 +653,7 @@ static int tegra_gpio_probe(struct platform_device *pdev)
for (i = 0; i < tgi->bank_count; i++) {
for (j = 0; j < 4; j++) {
int gpio = tegra_gpio_compose(i, j, 0);
+
tegra_gpio_writel(tgi, 0x00, GPIO_INT_ENB(tgi, gpio));
}
}
diff --git a/drivers/gpio/gpio-thunderx.c b/drivers/gpio/gpio-thunderx.c
new file mode 100644
index 000000000000..57efb251f9c4
--- /dev/null
+++ b/drivers/gpio/gpio-thunderx.c
@@ -0,0 +1,639 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2016, 2017 Cavium Inc.
+ */
+
+#include <linux/bitops.h>
+#include <linux/gpio/driver.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+
+
+#define GPIO_RX_DAT 0x0
+#define GPIO_TX_SET 0x8
+#define GPIO_TX_CLR 0x10
+#define GPIO_CONST 0x90
+#define GPIO_CONST_GPIOS_MASK 0xff
+#define GPIO_BIT_CFG 0x400
+#define GPIO_BIT_CFG_TX_OE BIT(0)
+#define GPIO_BIT_CFG_PIN_XOR BIT(1)
+#define GPIO_BIT_CFG_INT_EN BIT(2)
+#define GPIO_BIT_CFG_INT_TYPE BIT(3)
+#define GPIO_BIT_CFG_FIL_MASK GENMASK(11, 4)
+#define GPIO_BIT_CFG_FIL_CNT_SHIFT 4
+#define GPIO_BIT_CFG_FIL_SEL_SHIFT 8
+#define GPIO_BIT_CFG_TX_OD BIT(12)
+#define GPIO_BIT_CFG_PIN_SEL_MASK GENMASK(25, 16)
+#define GPIO_INTR 0x800
+#define GPIO_INTR_INTR BIT(0)
+#define GPIO_INTR_INTR_W1S BIT(1)
+#define GPIO_INTR_ENA_W1C BIT(2)
+#define GPIO_INTR_ENA_W1S BIT(3)
+#define GPIO_2ND_BANK 0x1400
+
+#define GLITCH_FILTER_400NS ((4u << GPIO_BIT_CFG_FIL_SEL_SHIFT) | \
+ (9u << GPIO_BIT_CFG_FIL_CNT_SHIFT))
+
+struct thunderx_gpio;
+
+struct thunderx_line {
+ struct thunderx_gpio *txgpio;
+ unsigned int line;
+ unsigned int fil_bits;
+};
+
+struct thunderx_gpio {
+ struct gpio_chip chip;
+ u8 __iomem *register_base;
+ struct irq_domain *irqd;
+ struct msix_entry *msix_entries; /* per line MSI-X */
+ struct thunderx_line *line_entries; /* per line irq info */
+ raw_spinlock_t lock;
+ unsigned long invert_mask[2];
+ unsigned long od_mask[2];
+ int base_msi;
+};
+
+static unsigned int bit_cfg_reg(unsigned int line)
+{
+ return 8 * line + GPIO_BIT_CFG;
+}
+
+static unsigned int intr_reg(unsigned int line)
+{
+ return 8 * line + GPIO_INTR;
+}
+
+static bool thunderx_gpio_is_gpio_nowarn(struct thunderx_gpio *txgpio,
+ unsigned int line)
+{
+ u64 bit_cfg = readq(txgpio->register_base + bit_cfg_reg(line));
+
+ return (bit_cfg & GPIO_BIT_CFG_PIN_SEL_MASK) == 0;
+}
+
+/*
+ * Check (and WARN) that the pin is available for GPIO. We will not
+ * allow modification of the state of non-GPIO pins from this driver.
+ */
+static bool thunderx_gpio_is_gpio(struct thunderx_gpio *txgpio,
+ unsigned int line)
+{
+ bool rv = thunderx_gpio_is_gpio_nowarn(txgpio, line);
+
+ WARN_RATELIMIT(!rv, "Pin %d not available for GPIO\n", line);
+
+ return rv;
+}
+
+static int thunderx_gpio_request(struct gpio_chip *chip, unsigned int line)
+{
+ struct thunderx_gpio *txgpio = gpiochip_get_data(chip);
+
+ return thunderx_gpio_is_gpio(txgpio, line) ? 0 : -EIO;
+}
+
+static int thunderx_gpio_dir_in(struct gpio_chip *chip, unsigned int line)
+{
+ struct thunderx_gpio *txgpio = gpiochip_get_data(chip);
+
+ if (!thunderx_gpio_is_gpio(txgpio, line))
+ return -EIO;
+
+ raw_spin_lock(&txgpio->lock);
+ clear_bit(line, txgpio->invert_mask);
+ clear_bit(line, txgpio->od_mask);
+ writeq(txgpio->line_entries[line].fil_bits,
+ txgpio->register_base + bit_cfg_reg(line));
+ raw_spin_unlock(&txgpio->lock);
+ return 0;
+}
+
+static void thunderx_gpio_set(struct gpio_chip *chip, unsigned int line,
+ int value)
+{
+ struct thunderx_gpio *txgpio = gpiochip_get_data(chip);
+ int bank = line / 64;
+ int bank_bit = line % 64;
+
+ void __iomem *reg = txgpio->register_base +
+ (bank * GPIO_2ND_BANK) + (value ? GPIO_TX_SET : GPIO_TX_CLR);
+
+ writeq(BIT_ULL(bank_bit), reg);
+}
+
+static int thunderx_gpio_dir_out(struct gpio_chip *chip, unsigned int line,
+ int value)
+{
+ struct thunderx_gpio *txgpio = gpiochip_get_data(chip);
+ u64 bit_cfg = txgpio->line_entries[line].fil_bits | GPIO_BIT_CFG_TX_OE;
+
+ if (!thunderx_gpio_is_gpio(txgpio, line))
+ return -EIO;
+
+ raw_spin_lock(&txgpio->lock);
+
+ thunderx_gpio_set(chip, line, value);
+
+ if (test_bit(line, txgpio->invert_mask))
+ bit_cfg |= GPIO_BIT_CFG_PIN_XOR;
+
+ if (test_bit(line, txgpio->od_mask))
+ bit_cfg |= GPIO_BIT_CFG_TX_OD;
+
+ writeq(bit_cfg, txgpio->register_base + bit_cfg_reg(line));
+
+ raw_spin_unlock(&txgpio->lock);
+ return 0;
+}
+
+static int thunderx_gpio_get_direction(struct gpio_chip *chip, unsigned int line)
+{
+ struct thunderx_gpio *txgpio = gpiochip_get_data(chip);
+ u64 bit_cfg;
+
+ if (!thunderx_gpio_is_gpio_nowarn(txgpio, line))
+ /*
+ * Say it is input for now to avoid WARNing on
+ * gpiochip_add_data(). We will WARN if someone
+ * requests it or tries to use it.
+ */
+ return 1;
+
+ bit_cfg = readq(txgpio->register_base + bit_cfg_reg(line));
+
+ return !(bit_cfg & GPIO_BIT_CFG_TX_OE);
+}
+
+static int thunderx_gpio_set_config(struct gpio_chip *chip,
+ unsigned int line,
+ unsigned long cfg)
+{
+ bool orig_invert, orig_od, orig_dat, new_invert, new_od;
+ u32 arg, sel;
+ u64 bit_cfg;
+ int bank = line / 64;
+ int bank_bit = line % 64;
+ int ret = -ENOTSUPP;
+ struct thunderx_gpio *txgpio = gpiochip_get_data(chip);
+ void __iomem *reg = txgpio->register_base + (bank * GPIO_2ND_BANK) + GPIO_TX_SET;
+
+ if (!thunderx_gpio_is_gpio(txgpio, line))
+ return -EIO;
+
+ raw_spin_lock(&txgpio->lock);
+ orig_invert = test_bit(line, txgpio->invert_mask);
+ new_invert = orig_invert;
+ orig_od = test_bit(line, txgpio->od_mask);
+ new_od = orig_od;
+ orig_dat = ((readq(reg) >> bank_bit) & 1) ^ orig_invert;
+ bit_cfg = readq(txgpio->register_base + bit_cfg_reg(line));
+ switch (pinconf_to_config_param(cfg)) {
+ case PIN_CONFIG_DRIVE_OPEN_DRAIN:
+ /*
+ * Weird, setting open-drain mode causes signal
+ * inversion. Note this so we can compensate in the
+ * dir_out function.
+ */
+ set_bit(line, txgpio->invert_mask);
+ new_invert = true;
+ set_bit(line, txgpio->od_mask);
+ new_od = true;
+ ret = 0;
+ break;
+ case PIN_CONFIG_DRIVE_PUSH_PULL:
+ clear_bit(line, txgpio->invert_mask);
+ new_invert = false;
+ clear_bit(line, txgpio->od_mask);
+ new_od = false;
+ ret = 0;
+ break;
+ case PIN_CONFIG_INPUT_DEBOUNCE:
+ arg = pinconf_to_config_argument(cfg);
+ if (arg > 1228) { /* 15 * 2^15 * 2.5nS maximum */
+ ret = -EINVAL;
+ break;
+ }
+ arg *= 400; /* scale to 2.5nS clocks. */
+ sel = 0;
+ while (arg > 15) {
+ sel++;
+ arg++; /* always round up */
+ arg >>= 1;
+ }
+ txgpio->line_entries[line].fil_bits =
+ (sel << GPIO_BIT_CFG_FIL_SEL_SHIFT) |
+ (arg << GPIO_BIT_CFG_FIL_CNT_SHIFT);
+ bit_cfg &= ~GPIO_BIT_CFG_FIL_MASK;
+ bit_cfg |= txgpio->line_entries[line].fil_bits;
+ writeq(bit_cfg, txgpio->register_base + bit_cfg_reg(line));
+ ret = 0;
+ break;
+ default:
+ break;
+ }
+ raw_spin_unlock(&txgpio->lock);
+
+ /*
+ * If currently output and OPEN_DRAIN changed, install the new
+ * settings
+ */
+ if ((new_invert != orig_invert || new_od != orig_od) &&
+ (bit_cfg & GPIO_BIT_CFG_TX_OE))
+ ret = thunderx_gpio_dir_out(chip, line, orig_dat ^ new_invert);
+
+ return ret;
+}
+
+static int thunderx_gpio_get(struct gpio_chip *chip, unsigned int line)
+{
+ struct thunderx_gpio *txgpio = gpiochip_get_data(chip);
+ int bank = line / 64;
+ int bank_bit = line % 64;
+ u64 read_bits = readq(txgpio->register_base + (bank * GPIO_2ND_BANK) + GPIO_RX_DAT);
+ u64 masked_bits = read_bits & BIT_ULL(bank_bit);
+
+ if (test_bit(line, txgpio->invert_mask))
+ return masked_bits == 0;
+ else
+ return masked_bits != 0;
+}
+
+static void thunderx_gpio_set_multiple(struct gpio_chip *chip,
+ unsigned long *mask,
+ unsigned long *bits)
+{
+ int bank;
+ u64 set_bits, clear_bits;
+ struct thunderx_gpio *txgpio = gpiochip_get_data(chip);
+
+ for (bank = 0; bank <= chip->ngpio / 64; bank++) {
+ set_bits = bits[bank] & mask[bank];
+ clear_bits = ~bits[bank] & mask[bank];
+ writeq(set_bits, txgpio->register_base + (bank * GPIO_2ND_BANK) + GPIO_TX_SET);
+ writeq(clear_bits, txgpio->register_base + (bank * GPIO_2ND_BANK) + GPIO_TX_CLR);
+ }
+}
+
+static void thunderx_gpio_irq_ack(struct irq_data *data)
+{
+ struct thunderx_line *txline = irq_data_get_irq_chip_data(data);
+
+ writeq(GPIO_INTR_INTR,
+ txline->txgpio->register_base + intr_reg(txline->line));
+}
+
+static void thunderx_gpio_irq_mask(struct irq_data *data)
+{
+ struct thunderx_line *txline = irq_data_get_irq_chip_data(data);
+
+ writeq(GPIO_INTR_ENA_W1C,
+ txline->txgpio->register_base + intr_reg(txline->line));
+}
+
+static void thunderx_gpio_irq_mask_ack(struct irq_data *data)
+{
+ struct thunderx_line *txline = irq_data_get_irq_chip_data(data);
+
+ writeq(GPIO_INTR_ENA_W1C | GPIO_INTR_INTR,
+ txline->txgpio->register_base + intr_reg(txline->line));
+}
+
+static void thunderx_gpio_irq_unmask(struct irq_data *data)
+{
+ struct thunderx_line *txline = irq_data_get_irq_chip_data(data);
+
+ writeq(GPIO_INTR_ENA_W1S,
+ txline->txgpio->register_base + intr_reg(txline->line));
+}
+
+static int thunderx_gpio_irq_set_type(struct irq_data *data,
+ unsigned int flow_type)
+{
+ struct thunderx_line *txline = irq_data_get_irq_chip_data(data);
+ struct thunderx_gpio *txgpio = txline->txgpio;
+ u64 bit_cfg;
+
+ irqd_set_trigger_type(data, flow_type);
+
+ bit_cfg = txline->fil_bits | GPIO_BIT_CFG_INT_EN;
+
+ if (flow_type & IRQ_TYPE_EDGE_BOTH) {
+ irq_set_handler_locked(data, handle_fasteoi_ack_irq);
+ bit_cfg |= GPIO_BIT_CFG_INT_TYPE;
+ } else {
+ irq_set_handler_locked(data, handle_fasteoi_mask_irq);
+ }
+
+ raw_spin_lock(&txgpio->lock);
+ if (flow_type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_LEVEL_LOW)) {
+ bit_cfg |= GPIO_BIT_CFG_PIN_XOR;
+ set_bit(txline->line, txgpio->invert_mask);
+ } else {
+ clear_bit(txline->line, txgpio->invert_mask);
+ }
+ clear_bit(txline->line, txgpio->od_mask);
+ writeq(bit_cfg, txgpio->register_base + bit_cfg_reg(txline->line));
+ raw_spin_unlock(&txgpio->lock);
+
+ return IRQ_SET_MASK_OK;
+}
+
+static void thunderx_gpio_irq_enable(struct irq_data *data)
+{
+ irq_chip_enable_parent(data);
+ thunderx_gpio_irq_unmask(data);
+}
+
+static void thunderx_gpio_irq_disable(struct irq_data *data)
+{
+ thunderx_gpio_irq_mask(data);
+ irq_chip_disable_parent(data);
+}
+
+static int thunderx_gpio_irq_request_resources(struct irq_data *data)
+{
+ struct thunderx_line *txline = irq_data_get_irq_chip_data(data);
+ struct thunderx_gpio *txgpio = txline->txgpio;
+ struct irq_data *parent_data = data->parent_data;
+ int r;
+
+ r = gpiochip_lock_as_irq(&txgpio->chip, txline->line);
+ if (r)
+ return r;
+
+ if (parent_data && parent_data->chip->irq_request_resources) {
+ r = parent_data->chip->irq_request_resources(parent_data);
+ if (r)
+ goto error;
+ }
+
+ return 0;
+error:
+ gpiochip_unlock_as_irq(&txgpio->chip, txline->line);
+ return r;
+}
+
+static void thunderx_gpio_irq_release_resources(struct irq_data *data)
+{
+ struct thunderx_line *txline = irq_data_get_irq_chip_data(data);
+ struct thunderx_gpio *txgpio = txline->txgpio;
+ struct irq_data *parent_data = data->parent_data;
+
+ if (parent_data && parent_data->chip->irq_release_resources)
+ parent_data->chip->irq_release_resources(parent_data);
+
+ gpiochip_unlock_as_irq(&txgpio->chip, txline->line);
+}
+
+/*
+ * Interrupts are chained from underlying MSI-X vectors. We have
+ * these irq_chip functions to be able to handle level triggering
+ * semantics and other acknowledgment tasks associated with the GPIO
+ * mechanism.
+ */
+static struct irq_chip thunderx_gpio_irq_chip = {
+ .name = "GPIO",
+ .irq_enable = thunderx_gpio_irq_enable,
+ .irq_disable = thunderx_gpio_irq_disable,
+ .irq_ack = thunderx_gpio_irq_ack,
+ .irq_mask = thunderx_gpio_irq_mask,
+ .irq_mask_ack = thunderx_gpio_irq_mask_ack,
+ .irq_unmask = thunderx_gpio_irq_unmask,
+ .irq_eoi = irq_chip_eoi_parent,
+ .irq_set_affinity = irq_chip_set_affinity_parent,
+ .irq_request_resources = thunderx_gpio_irq_request_resources,
+ .irq_release_resources = thunderx_gpio_irq_release_resources,
+ .irq_set_type = thunderx_gpio_irq_set_type,
+
+ .flags = IRQCHIP_SET_TYPE_MASKED
+};
+
+static int thunderx_gpio_irq_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ struct thunderx_gpio *txgpio = d->host_data;
+
+ if (hwirq >= txgpio->chip.ngpio)
+ return -EINVAL;
+ if (!thunderx_gpio_is_gpio_nowarn(txgpio, hwirq))
+ return -EPERM;
+ return 0;
+}
+
+static int thunderx_gpio_irq_translate(struct irq_domain *d,
+ struct irq_fwspec *fwspec,
+ irq_hw_number_t *hwirq,
+ unsigned int *type)
+{
+ struct thunderx_gpio *txgpio = d->host_data;
+
+ if (WARN_ON(fwspec->param_count < 2))
+ return -EINVAL;
+ if (fwspec->param[0] >= txgpio->chip.ngpio)
+ return -EINVAL;
+ *hwirq = fwspec->param[0];
+ *type = fwspec->param[1] & IRQ_TYPE_SENSE_MASK;
+ return 0;
+}
+
+static int thunderx_gpio_irq_alloc(struct irq_domain *d, unsigned int virq,
+ unsigned int nr_irqs, void *arg)
+{
+ struct thunderx_line *txline = arg;
+
+ return irq_domain_set_hwirq_and_chip(d, virq, txline->line,
+ &thunderx_gpio_irq_chip, txline);
+}
+
+static const struct irq_domain_ops thunderx_gpio_irqd_ops = {
+ .map = thunderx_gpio_irq_map,
+ .alloc = thunderx_gpio_irq_alloc,
+ .translate = thunderx_gpio_irq_translate
+};
+
+static int thunderx_gpio_to_irq(struct gpio_chip *chip, unsigned int offset)
+{
+ struct thunderx_gpio *txgpio = gpiochip_get_data(chip);
+
+ return irq_find_mapping(txgpio->irqd, offset);
+}
+
+static int thunderx_gpio_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ void __iomem * const *tbl;
+ struct device *dev = &pdev->dev;
+ struct thunderx_gpio *txgpio;
+ struct gpio_chip *chip;
+ int ngpio, i;
+ int err = 0;
+
+ txgpio = devm_kzalloc(dev, sizeof(*txgpio), GFP_KERNEL);
+ if (!txgpio)
+ return -ENOMEM;
+
+ raw_spin_lock_init(&txgpio->lock);
+ chip = &txgpio->chip;
+
+ pci_set_drvdata(pdev, txgpio);
+
+ err = pcim_enable_device(pdev);
+ if (err) {
+ dev_err(dev, "Failed to enable PCI device: err %d\n", err);
+ goto out;
+ }
+
+ err = pcim_iomap_regions(pdev, 1 << 0, KBUILD_MODNAME);
+ if (err) {
+ dev_err(dev, "Failed to iomap PCI device: err %d\n", err);
+ goto out;
+ }
+
+ tbl = pcim_iomap_table(pdev);
+ txgpio->register_base = tbl[0];
+ if (!txgpio->register_base) {
+ dev_err(dev, "Cannot map PCI resource\n");
+ err = -ENOMEM;
+ goto out;
+ }
+
+ if (pdev->subsystem_device == 0xa10a) {
+ /* CN88XX has no GPIO_CONST register*/
+ ngpio = 50;
+ txgpio->base_msi = 48;
+ } else {
+ u64 c = readq(txgpio->register_base + GPIO_CONST);
+
+ ngpio = c & GPIO_CONST_GPIOS_MASK;
+ txgpio->base_msi = (c >> 8) & 0xff;
+ }
+
+ txgpio->msix_entries = devm_kzalloc(dev,
+ sizeof(struct msix_entry) * ngpio,
+ GFP_KERNEL);
+ if (!txgpio->msix_entries) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ txgpio->line_entries = devm_kzalloc(dev,
+ sizeof(struct thunderx_line) * ngpio,
+ GFP_KERNEL);
+ if (!txgpio->line_entries) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ for (i = 0; i < ngpio; i++) {
+ u64 bit_cfg = readq(txgpio->register_base + bit_cfg_reg(i));
+
+ txgpio->msix_entries[i].entry = txgpio->base_msi + (2 * i);
+ txgpio->line_entries[i].line = i;
+ txgpio->line_entries[i].txgpio = txgpio;
+ /*
+ * If something has already programmed the pin, use
+ * the existing glitch filter settings, otherwise go
+ * to 400nS.
+ */
+ txgpio->line_entries[i].fil_bits = bit_cfg ?
+ (bit_cfg & GPIO_BIT_CFG_FIL_MASK) : GLITCH_FILTER_400NS;
+
+ if ((bit_cfg & GPIO_BIT_CFG_TX_OE) && (bit_cfg & GPIO_BIT_CFG_TX_OD))
+ set_bit(i, txgpio->od_mask);
+ if (bit_cfg & GPIO_BIT_CFG_PIN_XOR)
+ set_bit(i, txgpio->invert_mask);
+ }
+
+
+ /* Enable all MSI-X for interrupts on all possible lines. */
+ err = pci_enable_msix_range(pdev, txgpio->msix_entries, ngpio, ngpio);
+ if (err < 0)
+ goto out;
+
+ /*
+ * Push GPIO specific irqdomain on hierarchy created as a side
+ * effect of the pci_enable_msix()
+ */
+ txgpio->irqd = irq_domain_create_hierarchy(irq_get_irq_data(txgpio->msix_entries[0].vector)->domain,
+ 0, 0, of_node_to_fwnode(dev->of_node),
+ &thunderx_gpio_irqd_ops, txgpio);
+ if (!txgpio->irqd)
+ goto out;
+
+ /* Push on irq_data and the domain for each line. */
+ for (i = 0; i < ngpio; i++) {
+ err = irq_domain_push_irq(txgpio->irqd,
+ txgpio->msix_entries[i].vector,
+ &txgpio->line_entries[i]);
+ if (err < 0)
+ dev_err(dev, "irq_domain_push_irq: %d\n", err);
+ }
+
+ chip->label = KBUILD_MODNAME;
+ chip->parent = dev;
+ chip->owner = THIS_MODULE;
+ chip->request = thunderx_gpio_request;
+ chip->base = -1; /* System allocated */
+ chip->can_sleep = false;
+ chip->ngpio = ngpio;
+ chip->get_direction = thunderx_gpio_get_direction;
+ chip->direction_input = thunderx_gpio_dir_in;
+ chip->get = thunderx_gpio_get;
+ chip->direction_output = thunderx_gpio_dir_out;
+ chip->set = thunderx_gpio_set;
+ chip->set_multiple = thunderx_gpio_set_multiple;
+ chip->set_config = thunderx_gpio_set_config;
+ chip->to_irq = thunderx_gpio_to_irq;
+ err = devm_gpiochip_add_data(dev, chip, txgpio);
+ if (err)
+ goto out;
+
+ dev_info(dev, "ThunderX GPIO: %d lines with base %d.\n",
+ ngpio, chip->base);
+ return 0;
+out:
+ pci_set_drvdata(pdev, NULL);
+ return err;
+}
+
+static void thunderx_gpio_remove(struct pci_dev *pdev)
+{
+ int i;
+ struct thunderx_gpio *txgpio = pci_get_drvdata(pdev);
+
+ for (i = 0; i < txgpio->chip.ngpio; i++)
+ irq_domain_pop_irq(txgpio->irqd,
+ txgpio->msix_entries[i].vector);
+
+ irq_domain_remove(txgpio->irqd);
+
+ pci_set_drvdata(pdev, NULL);
+}
+
+static const struct pci_device_id thunderx_gpio_id_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, 0xA00A) },
+ { 0, } /* end of table */
+};
+
+MODULE_DEVICE_TABLE(pci, thunderx_gpio_id_table);
+
+static struct pci_driver thunderx_gpio_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = thunderx_gpio_id_table,
+ .probe = thunderx_gpio_probe,
+ .remove = thunderx_gpio_remove,
+};
+
+module_pci_driver(thunderx_gpio_driver);
+
+MODULE_DESCRIPTION("Cavium Inc. ThunderX/OCTEON-TX GPIO Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-tps68470.c b/drivers/gpio/gpio-tps68470.c
new file mode 100644
index 000000000000..fa2662f8b026
--- /dev/null
+++ b/drivers/gpio/gpio-tps68470.c
@@ -0,0 +1,176 @@
+/*
+ * GPIO driver for TPS68470 PMIC
+ *
+ * Copyright (C) 2017 Intel Corporation
+ *
+ * Authors:
+ * Antti Laakso <antti.laakso@intel.com>
+ * Tianshu Qiu <tian.shu.qiu@intel.com>
+ * Jian Xu Zheng <jian.xu.zheng@intel.com>
+ * Yuning Pu <yuning.pu@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/gpio/driver.h>
+#include <linux/mfd/tps68470.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#define TPS68470_N_LOGIC_OUTPUT 3
+#define TPS68470_N_REGULAR_GPIO 7
+#define TPS68470_N_GPIO (TPS68470_N_LOGIC_OUTPUT + TPS68470_N_REGULAR_GPIO)
+
+struct tps68470_gpio_data {
+ struct regmap *tps68470_regmap;
+ struct gpio_chip gc;
+};
+
+static int tps68470_gpio_get(struct gpio_chip *gc, unsigned int offset)
+{
+ struct tps68470_gpio_data *tps68470_gpio = gpiochip_get_data(gc);
+ struct regmap *regmap = tps68470_gpio->tps68470_regmap;
+ unsigned int reg = TPS68470_REG_GPDO;
+ int val, ret;
+
+ if (offset >= TPS68470_N_REGULAR_GPIO) {
+ offset -= TPS68470_N_REGULAR_GPIO;
+ reg = TPS68470_REG_SGPO;
+ }
+
+ ret = regmap_read(regmap, reg, &val);
+ if (ret) {
+ dev_err(tps68470_gpio->gc.parent, "reg 0x%x read failed\n",
+ TPS68470_REG_SGPO);
+ return ret;
+ }
+ return !!(val & BIT(offset));
+}
+
+/* Return 0 if output, 1 if input */
+static int tps68470_gpio_get_direction(struct gpio_chip *gc,
+ unsigned int offset)
+{
+ struct tps68470_gpio_data *tps68470_gpio = gpiochip_get_data(gc);
+ struct regmap *regmap = tps68470_gpio->tps68470_regmap;
+ int val, ret;
+
+ /* rest are always outputs */
+ if (offset >= TPS68470_N_REGULAR_GPIO)
+ return 0;
+
+ ret = regmap_read(regmap, TPS68470_GPIO_CTL_REG_A(offset), &val);
+ if (ret) {
+ dev_err(tps68470_gpio->gc.parent, "reg 0x%x read failed\n",
+ TPS68470_GPIO_CTL_REG_A(offset));
+ return ret;
+ }
+
+ val &= TPS68470_GPIO_MODE_MASK;
+ return val >= TPS68470_GPIO_MODE_OUT_CMOS ? 0 : 1;
+}
+
+static void tps68470_gpio_set(struct gpio_chip *gc, unsigned int offset,
+ int value)
+{
+ struct tps68470_gpio_data *tps68470_gpio = gpiochip_get_data(gc);
+ struct regmap *regmap = tps68470_gpio->tps68470_regmap;
+ unsigned int reg = TPS68470_REG_GPDO;
+
+ if (offset >= TPS68470_N_REGULAR_GPIO) {
+ reg = TPS68470_REG_SGPO;
+ offset -= TPS68470_N_REGULAR_GPIO;
+ }
+
+ regmap_update_bits(regmap, reg, BIT(offset), value ? BIT(offset) : 0);
+}
+
+static int tps68470_gpio_output(struct gpio_chip *gc, unsigned int offset,
+ int value)
+{
+ struct tps68470_gpio_data *tps68470_gpio = gpiochip_get_data(gc);
+ struct regmap *regmap = tps68470_gpio->tps68470_regmap;
+
+ /* rest are always outputs */
+ if (offset >= TPS68470_N_REGULAR_GPIO)
+ return 0;
+
+ /* Set the initial value */
+ tps68470_gpio_set(gc, offset, value);
+
+ return regmap_update_bits(regmap, TPS68470_GPIO_CTL_REG_A(offset),
+ TPS68470_GPIO_MODE_MASK,
+ TPS68470_GPIO_MODE_OUT_CMOS);
+}
+
+static int tps68470_gpio_input(struct gpio_chip *gc, unsigned int offset)
+{
+ struct tps68470_gpio_data *tps68470_gpio = gpiochip_get_data(gc);
+ struct regmap *regmap = tps68470_gpio->tps68470_regmap;
+
+ /* rest are always outputs */
+ if (offset >= TPS68470_N_REGULAR_GPIO)
+ return -EINVAL;
+
+ return regmap_update_bits(regmap, TPS68470_GPIO_CTL_REG_A(offset),
+ TPS68470_GPIO_MODE_MASK, 0x00);
+}
+
+static const char *tps68470_names[TPS68470_N_GPIO] = {
+ "gpio.0", "gpio.1", "gpio.2", "gpio.3",
+ "gpio.4", "gpio.5", "gpio.6",
+ "s_enable", "s_idle", "s_resetn",
+};
+
+static int tps68470_gpio_probe(struct platform_device *pdev)
+{
+ struct tps68470_gpio_data *tps68470_gpio;
+ int ret;
+
+ tps68470_gpio = devm_kzalloc(&pdev->dev, sizeof(*tps68470_gpio),
+ GFP_KERNEL);
+ if (!tps68470_gpio)
+ return -ENOMEM;
+
+ tps68470_gpio->tps68470_regmap = dev_get_drvdata(pdev->dev.parent);
+ tps68470_gpio->gc.label = "tps68470-gpio";
+ tps68470_gpio->gc.owner = THIS_MODULE;
+ tps68470_gpio->gc.direction_input = tps68470_gpio_input;
+ tps68470_gpio->gc.direction_output = tps68470_gpio_output;
+ tps68470_gpio->gc.get = tps68470_gpio_get;
+ tps68470_gpio->gc.get_direction = tps68470_gpio_get_direction;
+ tps68470_gpio->gc.set = tps68470_gpio_set;
+ tps68470_gpio->gc.can_sleep = true;
+ tps68470_gpio->gc.names = tps68470_names;
+ tps68470_gpio->gc.ngpio = TPS68470_N_GPIO;
+ tps68470_gpio->gc.base = -1;
+ tps68470_gpio->gc.parent = &pdev->dev;
+
+ ret = devm_gpiochip_add_data(&pdev->dev, &tps68470_gpio->gc,
+ tps68470_gpio);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to register gpio_chip: %d\n", ret);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, tps68470_gpio);
+
+ return ret;
+}
+
+static struct platform_driver tps68470_gpio_driver = {
+ .driver = {
+ .name = "tps68470-gpio",
+ },
+ .probe = tps68470_gpio_probe,
+};
+
+builtin_platform_driver(tps68470_gpio_driver)
diff --git a/drivers/gpio/gpio-twl4030.c b/drivers/gpio/gpio-twl4030.c
index 24f388ed46d4..9b511df5450e 100644
--- a/drivers/gpio/gpio-twl4030.c
+++ b/drivers/gpio/gpio-twl4030.c
@@ -35,7 +35,7 @@
#include <linux/of.h>
#include <linux/irqdomain.h>
-#include <linux/i2c/twl.h>
+#include <linux/mfd/twl.h>
/*
* The GPIO "subchip" supports 18 GPIOs which can be configured as
diff --git a/drivers/gpio/gpio-twl6040.c b/drivers/gpio/gpio-twl6040.c
index b780314cdfc9..dadeacf43e0c 100644
--- a/drivers/gpio/gpio-twl6040.c
+++ b/drivers/gpio/gpio-twl6040.c
@@ -32,8 +32,6 @@
#include <linux/mfd/twl6040.h>
-static struct gpio_chip twl6040gpo_chip;
-
static int twl6040gpo_get(struct gpio_chip *chip, unsigned offset)
{
struct twl6040 *twl6040 = dev_get_drvdata(chip->parent->parent);
diff --git a/drivers/gpio/gpio-tz1090.c b/drivers/gpio/gpio-tz1090.c
index ca958e0f6909..22c5be65051f 100644
--- a/drivers/gpio/gpio-tz1090.c
+++ b/drivers/gpio/gpio-tz1090.c
@@ -527,13 +527,12 @@ static void tz1090_gpio_register_banks(struct tz1090_gpio *priv)
ret = of_property_read_u32(node, "reg", &addr);
if (ret) {
- dev_err(priv->dev, "invalid reg on %s\n",
- node->full_name);
+ dev_err(priv->dev, "invalid reg on %pOF\n", node);
continue;
}
if (addr >= 3) {
- dev_err(priv->dev, "index %u in %s out of range\n",
- addr, node->full_name);
+ dev_err(priv->dev, "index %u in %pOF out of range\n",
+ addr, node);
continue;
}
@@ -543,8 +542,7 @@ static void tz1090_gpio_register_banks(struct tz1090_gpio *priv)
ret = tz1090_gpio_bank_probe(&info);
if (ret) {
- dev_err(priv->dev, "failure registering %s\n",
- node->full_name);
+ dev_err(priv->dev, "failure registering %pOF\n", node);
of_node_put(node);
continue;
}
diff --git a/drivers/gpio/gpio-vf610.c b/drivers/gpio/gpio-vf610.c
index 521fbe338589..cbe9e06861de 100644
--- a/drivers/gpio/gpio-vf610.c
+++ b/drivers/gpio/gpio-vf610.c
@@ -30,10 +30,16 @@
#define VF610_GPIO_PER_PORT 32
+struct fsl_gpio_soc_data {
+ /* SoCs has a Port Data Direction Register (PDDR) */
+ bool have_paddr;
+};
+
struct vf610_gpio_port {
struct gpio_chip gc;
void __iomem *base;
void __iomem *gpio_base;
+ const struct fsl_gpio_soc_data *sdata;
u8 irqc[VF610_GPIO_PER_PORT];
int irq;
};
@@ -43,6 +49,7 @@ struct vf610_gpio_port {
#define GPIO_PCOR 0x08
#define GPIO_PTOR 0x0c
#define GPIO_PDIR 0x10
+#define GPIO_PDDR 0x14
#define PORT_PCR(n) ((n) * 0x4)
#define PORT_PCR_IRQC_OFFSET 16
@@ -61,8 +68,13 @@ struct vf610_gpio_port {
static struct irq_chip vf610_gpio_irq_chip;
+static const struct fsl_gpio_soc_data imx_data = {
+ .have_paddr = true,
+};
+
static const struct of_device_id vf610_gpio_dt_ids[] = {
- { .compatible = "fsl,vf610-gpio" },
+ { .compatible = "fsl,vf610-gpio", .data = NULL, },
+ { .compatible = "fsl,imx7ulp-gpio", .data = &imx_data, },
{ /* sentinel */ }
};
@@ -79,8 +91,18 @@ static inline u32 vf610_gpio_readl(void __iomem *reg)
static int vf610_gpio_get(struct gpio_chip *gc, unsigned int gpio)
{
struct vf610_gpio_port *port = gpiochip_get_data(gc);
-
- return !!(vf610_gpio_readl(port->gpio_base + GPIO_PDIR) & BIT(gpio));
+ unsigned long mask = BIT(gpio);
+ void __iomem *addr;
+
+ if (port->sdata && port->sdata->have_paddr) {
+ mask &= vf610_gpio_readl(port->gpio_base + GPIO_PDDR);
+ addr = mask ? port->gpio_base + GPIO_PDOR :
+ port->gpio_base + GPIO_PDIR;
+ return !!(vf610_gpio_readl(addr) & BIT(gpio));
+ } else {
+ return !!(vf610_gpio_readl(port->gpio_base + GPIO_PDIR)
+ & BIT(gpio));
+ }
}
static void vf610_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
@@ -96,12 +118,28 @@ static void vf610_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
static int vf610_gpio_direction_input(struct gpio_chip *chip, unsigned gpio)
{
+ struct vf610_gpio_port *port = gpiochip_get_data(chip);
+ unsigned long mask = BIT(gpio);
+ u32 val;
+
+ if (port->sdata && port->sdata->have_paddr) {
+ val = vf610_gpio_readl(port->gpio_base + GPIO_PDDR);
+ val &= ~mask;
+ vf610_gpio_writel(val, port->gpio_base + GPIO_PDDR);
+ }
+
return pinctrl_gpio_direction_input(chip->base + gpio);
}
static int vf610_gpio_direction_output(struct gpio_chip *chip, unsigned gpio,
int value)
{
+ struct vf610_gpio_port *port = gpiochip_get_data(chip);
+ unsigned long mask = BIT(gpio);
+
+ if (port->sdata && port->sdata->have_paddr)
+ vf610_gpio_writel(mask, port->gpio_base + GPIO_PDDR);
+
vf610_gpio_set(chip, gpio, value);
return pinctrl_gpio_direction_output(chip->base + gpio);
@@ -216,6 +254,8 @@ static struct irq_chip vf610_gpio_irq_chip = {
static int vf610_gpio_probe(struct platform_device *pdev)
{
+ const struct of_device_id *of_id = of_match_device(vf610_gpio_dt_ids,
+ &pdev->dev);
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct vf610_gpio_port *port;
@@ -227,6 +267,7 @@ static int vf610_gpio_probe(struct platform_device *pdev)
if (!port)
return -ENOMEM;
+ port->sdata = of_id->data;
iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
port->base = devm_ioremap_resource(dev, iores);
if (IS_ERR(port->base))
diff --git a/drivers/gpio/gpio-xilinx.c b/drivers/gpio/gpio-xilinx.c
index 14b2a62338ea..e8ec0e33a0a9 100644
--- a/drivers/gpio/gpio-xilinx.c
+++ b/drivers/gpio/gpio-xilinx.c
@@ -360,8 +360,8 @@ static int xgpio_probe(struct platform_device *pdev)
/* Call the OF gpio helper to setup and register the GPIO device */
status = of_mm_gpiochip_add_data(np, &chip->mmchip, chip);
if (status) {
- pr_err("%s: error in probe function with status %d\n",
- np->full_name, status);
+ pr_err("%pOF: error in probe function with status %d\n",
+ np, status);
return status;
}
diff --git a/drivers/gpio/gpio-zevio.c b/drivers/gpio/gpio-zevio.c
index e23ef7b9451d..3926ce9c2840 100644
--- a/drivers/gpio/gpio-zevio.c
+++ b/drivers/gpio/gpio-zevio.c
@@ -156,7 +156,7 @@ static int zevio_gpio_to_irq(struct gpio_chip *chip, unsigned pin)
return -ENXIO;
}
-static struct gpio_chip zevio_gpio_chip = {
+static const struct gpio_chip zevio_gpio_chip = {
.direction_input = zevio_gpio_direction_input,
.direction_output = zevio_gpio_direction_output,
.set = zevio_gpio_set,
diff --git a/drivers/gpio/gpio-zynq.c b/drivers/gpio/gpio-zynq.c
index df0851464006..b3cc948a2d8b 100644
--- a/drivers/gpio/gpio-zynq.c
+++ b/drivers/gpio/gpio-zynq.c
@@ -60,13 +60,13 @@
#define ZYNQ_GPIO_BANK5_PIN_MAX(str) (ZYNQ_GPIO_BANK5_PIN_MIN(str) + \
ZYNQ##str##_GPIO_BANK5_NGPIO - 1)
-
/* Register offsets for the GPIO device */
/* LSW Mask & Data -WO */
#define ZYNQ_GPIO_DATA_LSW_OFFSET(BANK) (0x000 + (8 * BANK))
/* MSW Mask & Data -WO */
#define ZYNQ_GPIO_DATA_MSW_OFFSET(BANK) (0x004 + (8 * BANK))
/* Data Register-RW */
+#define ZYNQ_GPIO_DATA_OFFSET(BANK) (0x040 + (4 * BANK))
#define ZYNQ_GPIO_DATA_RO_OFFSET(BANK) (0x060 + (4 * BANK))
/* Direction mode reg-RW */
#define ZYNQ_GPIO_DIRM_OFFSET(BANK) (0x204 + (0x40 * BANK))
@@ -98,6 +98,19 @@
/* set to differentiate zynq from zynqmp, 0=zynqmp, 1=zynq */
#define ZYNQ_GPIO_QUIRK_IS_ZYNQ BIT(0)
+#define GPIO_QUIRK_DATA_RO_BUG BIT(1)
+
+struct gpio_regs {
+ u32 datamsw[ZYNQMP_GPIO_MAX_BANK];
+ u32 datalsw[ZYNQMP_GPIO_MAX_BANK];
+ u32 dirm[ZYNQMP_GPIO_MAX_BANK];
+ u32 outen[ZYNQMP_GPIO_MAX_BANK];
+ u32 int_en[ZYNQMP_GPIO_MAX_BANK];
+ u32 int_dis[ZYNQMP_GPIO_MAX_BANK];
+ u32 int_type[ZYNQMP_GPIO_MAX_BANK];
+ u32 int_polarity[ZYNQMP_GPIO_MAX_BANK];
+ u32 int_any[ZYNQMP_GPIO_MAX_BANK];
+};
/**
* struct zynq_gpio - gpio device private data structure
@@ -106,6 +119,7 @@
* @clk: clock resource for this controller
* @irq: interrupt for the GPIO device
* @p_data: pointer to platform data
+ * @context: context registers
*/
struct zynq_gpio {
struct gpio_chip chip;
@@ -113,16 +127,18 @@ struct zynq_gpio {
struct clk *clk;
int irq;
const struct zynq_platform_data *p_data;
+ struct gpio_regs context;
};
/**
* struct zynq_platform_data - zynq gpio platform data structure
* @label: string to store in gpio->label
+ * @quirks: Flags is used to identify the platform
* @ngpio: max number of gpio pins
* @max_bank: maximum number of gpio banks
* @bank_min: this array represents bank's min pin
* @bank_max: this array represents bank's max pin
-*/
+ */
struct zynq_platform_data {
const char *label;
u32 quirks;
@@ -147,6 +163,17 @@ static int zynq_gpio_is_zynq(struct zynq_gpio *gpio)
}
/**
+ * gpio_data_ro_bug - test if HW bug exists or not
+ * @gpio: Pointer to driver data struct
+ *
+ * Return: 0 if bug doesnot exist, 1 if bug exists.
+ */
+static int gpio_data_ro_bug(struct zynq_gpio *gpio)
+{
+ return !!(gpio->p_data->quirks & GPIO_QUIRK_DATA_RO_BUG);
+}
+
+/**
* zynq_gpio_get_bank_pin - Get the bank number and pin number within that bank
* for a given pin in the GPIO device
* @pin_num: gpio pin number within the device
@@ -154,6 +181,7 @@ static int zynq_gpio_is_zynq(struct zynq_gpio *gpio)
* pin
* @bank_pin_num: an output parameter used to return pin number within a bank
* for the given gpio pin
+ * @gpio: gpio device data structure
*
* Returns the bank number and pin offset within the bank.
*/
@@ -166,11 +194,11 @@ static inline void zynq_gpio_get_bank_pin(unsigned int pin_num,
for (bank = 0; bank < gpio->p_data->max_bank; bank++) {
if ((pin_num >= gpio->p_data->bank_min[bank]) &&
- (pin_num <= gpio->p_data->bank_max[bank])) {
- *bank_num = bank;
- *bank_pin_num = pin_num -
- gpio->p_data->bank_min[bank];
- return;
+ (pin_num <= gpio->p_data->bank_max[bank])) {
+ *bank_num = bank;
+ *bank_pin_num = pin_num -
+ gpio->p_data->bank_min[bank];
+ return;
}
}
@@ -197,9 +225,28 @@ static int zynq_gpio_get_value(struct gpio_chip *chip, unsigned int pin)
zynq_gpio_get_bank_pin(pin, &bank_num, &bank_pin_num, gpio);
- data = readl_relaxed(gpio->base_addr +
- ZYNQ_GPIO_DATA_RO_OFFSET(bank_num));
-
+ if (gpio_data_ro_bug(gpio)) {
+ if (zynq_gpio_is_zynq(gpio)) {
+ if (bank_num <= 1) {
+ data = readl_relaxed(gpio->base_addr +
+ ZYNQ_GPIO_DATA_RO_OFFSET(bank_num));
+ } else {
+ data = readl_relaxed(gpio->base_addr +
+ ZYNQ_GPIO_DATA_OFFSET(bank_num));
+ }
+ } else {
+ if (bank_num <= 2) {
+ data = readl_relaxed(gpio->base_addr +
+ ZYNQ_GPIO_DATA_RO_OFFSET(bank_num));
+ } else {
+ data = readl_relaxed(gpio->base_addr +
+ ZYNQ_GPIO_DATA_OFFSET(bank_num));
+ }
+ }
+ } else {
+ data = readl_relaxed(gpio->base_addr +
+ ZYNQ_GPIO_DATA_RO_OFFSET(bank_num));
+ }
return (data >> bank_pin_num) & 1;
}
@@ -263,7 +310,7 @@ static int zynq_gpio_dir_in(struct gpio_chip *chip, unsigned int pin)
* as inputs.
*/
if (zynq_gpio_is_zynq(gpio) && bank_num == 0 &&
- (bank_pin_num == 7 || bank_pin_num == 8))
+ (bank_pin_num == 7 || bank_pin_num == 8))
return -EINVAL;
/* clear the bit in direction mode reg to set the pin as input */
@@ -464,13 +511,14 @@ static int zynq_gpio_set_irq_type(struct irq_data *irq_data, unsigned int type)
writel_relaxed(int_any,
gpio->base_addr + ZYNQ_GPIO_INTANY_OFFSET(bank_num));
- if (type & IRQ_TYPE_LEVEL_MASK) {
+ if (type & IRQ_TYPE_LEVEL_MASK)
irq_set_chip_handler_name_locked(irq_data,
- &zynq_gpio_level_irqchip, handle_fasteoi_irq, NULL);
- } else {
+ &zynq_gpio_level_irqchip,
+ handle_fasteoi_irq, NULL);
+ else
irq_set_chip_handler_name_locked(irq_data,
- &zynq_gpio_edge_irqchip, handle_level_irq, NULL);
- }
+ &zynq_gpio_edge_irqchip,
+ handle_level_irq, NULL);
return 0;
}
@@ -530,7 +578,6 @@ static void zynq_gpio_handle_bank_irq(struct zynq_gpio *gpio,
/**
* zynq_gpio_irqhandler - IRQ handler for the gpio banks of a gpio device
- * @irq: irq number of the gpio bank where interrupt has occurred
* @desc: irq descriptor instance of the 'irq'
*
* This function reads the Interrupt Status Register of each bank to get the
@@ -560,14 +607,73 @@ static void zynq_gpio_irqhandler(struct irq_desc *desc)
chained_irq_exit(irqchip, desc);
}
+static void zynq_gpio_save_context(struct zynq_gpio *gpio)
+{
+ unsigned int bank_num;
+
+ for (bank_num = 0; bank_num < gpio->p_data->max_bank; bank_num++) {
+ gpio->context.datalsw[bank_num] =
+ readl_relaxed(gpio->base_addr +
+ ZYNQ_GPIO_DATA_LSW_OFFSET(bank_num));
+ gpio->context.datamsw[bank_num] =
+ readl_relaxed(gpio->base_addr +
+ ZYNQ_GPIO_DATA_MSW_OFFSET(bank_num));
+ gpio->context.dirm[bank_num] = readl_relaxed(gpio->base_addr +
+ ZYNQ_GPIO_DIRM_OFFSET(bank_num));
+ gpio->context.int_en[bank_num] = readl_relaxed(gpio->base_addr +
+ ZYNQ_GPIO_INTMASK_OFFSET(bank_num));
+ gpio->context.int_type[bank_num] =
+ readl_relaxed(gpio->base_addr +
+ ZYNQ_GPIO_INTTYPE_OFFSET(bank_num));
+ gpio->context.int_polarity[bank_num] =
+ readl_relaxed(gpio->base_addr +
+ ZYNQ_GPIO_INTPOL_OFFSET(bank_num));
+ gpio->context.int_any[bank_num] =
+ readl_relaxed(gpio->base_addr +
+ ZYNQ_GPIO_INTANY_OFFSET(bank_num));
+ }
+}
+
+static void zynq_gpio_restore_context(struct zynq_gpio *gpio)
+{
+ unsigned int bank_num;
+
+ for (bank_num = 0; bank_num < gpio->p_data->max_bank; bank_num++) {
+ writel_relaxed(gpio->context.datalsw[bank_num],
+ gpio->base_addr +
+ ZYNQ_GPIO_DATA_LSW_OFFSET(bank_num));
+ writel_relaxed(gpio->context.datamsw[bank_num],
+ gpio->base_addr +
+ ZYNQ_GPIO_DATA_MSW_OFFSET(bank_num));
+ writel_relaxed(gpio->context.dirm[bank_num],
+ gpio->base_addr +
+ ZYNQ_GPIO_DIRM_OFFSET(bank_num));
+ writel_relaxed(gpio->context.int_en[bank_num],
+ gpio->base_addr +
+ ZYNQ_GPIO_INTEN_OFFSET(bank_num));
+ writel_relaxed(gpio->context.int_type[bank_num],
+ gpio->base_addr +
+ ZYNQ_GPIO_INTTYPE_OFFSET(bank_num));
+ writel_relaxed(gpio->context.int_polarity[bank_num],
+ gpio->base_addr +
+ ZYNQ_GPIO_INTPOL_OFFSET(bank_num));
+ writel_relaxed(gpio->context.int_any[bank_num],
+ gpio->base_addr +
+ ZYNQ_GPIO_INTANY_OFFSET(bank_num));
+ }
+}
+
static int __maybe_unused zynq_gpio_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
int irq = platform_get_irq(pdev, 0);
struct irq_data *data = irq_get_irq_data(irq);
+ struct zynq_gpio *gpio = platform_get_drvdata(pdev);
- if (!irqd_is_wakeup_set(data))
+ if (!irqd_is_wakeup_set(data)) {
+ zynq_gpio_save_context(gpio);
return pm_runtime_force_suspend(dev);
+ }
return 0;
}
@@ -577,9 +683,14 @@ static int __maybe_unused zynq_gpio_resume(struct device *dev)
struct platform_device *pdev = to_platform_device(dev);
int irq = platform_get_irq(pdev, 0);
struct irq_data *data = irq_get_irq_data(irq);
+ struct zynq_gpio *gpio = platform_get_drvdata(pdev);
+ int ret;
- if (!irqd_is_wakeup_set(data))
- return pm_runtime_force_resume(dev);
+ if (!irqd_is_wakeup_set(data)) {
+ ret = pm_runtime_force_resume(dev);
+ zynq_gpio_restore_context(gpio);
+ return ret;
+ }
return 0;
}
@@ -602,7 +713,7 @@ static int __maybe_unused zynq_gpio_runtime_resume(struct device *dev)
return clk_prepare_enable(gpio->clk);
}
-static int zynq_gpio_request(struct gpio_chip *chip, unsigned offset)
+static int zynq_gpio_request(struct gpio_chip *chip, unsigned int offset)
{
int ret;
@@ -615,7 +726,7 @@ static int zynq_gpio_request(struct gpio_chip *chip, unsigned offset)
return ret < 0 ? ret : 0;
}
-static void zynq_gpio_free(struct gpio_chip *chip, unsigned offset)
+static void zynq_gpio_free(struct gpio_chip *chip, unsigned int offset)
{
pm_runtime_put(chip->parent);
}
@@ -623,11 +734,12 @@ static void zynq_gpio_free(struct gpio_chip *chip, unsigned offset)
static const struct dev_pm_ops zynq_gpio_dev_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(zynq_gpio_suspend, zynq_gpio_resume)
SET_RUNTIME_PM_OPS(zynq_gpio_runtime_suspend,
- zynq_gpio_runtime_resume, NULL)
+ zynq_gpio_runtime_resume, NULL)
};
static const struct zynq_platform_data zynqmp_gpio_def = {
.label = "zynqmp_gpio",
+ .quirks = GPIO_QUIRK_DATA_RO_BUG,
.ngpio = ZYNQMP_GPIO_NR_GPIOS,
.max_bank = ZYNQMP_GPIO_MAX_BANK,
.bank_min[0] = ZYNQ_GPIO_BANK0_PIN_MIN(MP),
@@ -646,7 +758,7 @@ static const struct zynq_platform_data zynqmp_gpio_def = {
static const struct zynq_platform_data zynq_gpio_def = {
.label = "zynq_gpio",
- .quirks = ZYNQ_GPIO_QUIRK_IS_ZYNQ,
+ .quirks = ZYNQ_GPIO_QUIRK_IS_ZYNQ | GPIO_QUIRK_DATA_RO_BUG,
.ngpio = ZYNQ_GPIO_NR_GPIOS,
.max_bank = ZYNQ_GPIO_MAX_BANK,
.bank_min[0] = ZYNQ_GPIO_BANK0_PIN_MIN(),
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index c9b42dd12dfa..4d2113530735 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -61,7 +61,7 @@ static int acpi_gpiochip_find(struct gpio_chip *gc, void *data)
#ifdef CONFIG_PINCTRL
/**
* acpi_gpiochip_pin_to_gpio_offset() - translates ACPI GPIO to Linux GPIO
- * @chip: GPIO chip
+ * @gdev: GPIO device
* @pin: ACPI GPIO pin number from GpioIo/GpioInt resource
*
* Function takes ACPI GpioIo/GpioInt pin number as a parameter and
@@ -763,7 +763,7 @@ struct gpio_desc *acpi_node_get_gpiod(struct fwnode_handle *fwnode,
* The function is idempotent, though each time it runs it will configure GPIO
* pin direction according to the flags in GpioInt resource.
*
- * Return: Linux IRQ number (>%0) on success, negative errno on failure.
+ * Return: Linux IRQ number (> %0) on success, negative errno on failure.
*/
int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index)
{
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index 54ce8dc58ad0..bfcd20699ec8 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -78,8 +78,8 @@ struct gpio_desc *of_get_named_gpiod_flags(struct device_node *np,
ret = of_parse_phandle_with_args(np, propname, "#gpio-cells", index,
&gpiospec);
if (ret) {
- pr_debug("%s: can't parse '%s' property of node '%s[%d]'\n",
- __func__, propname, np->full_name, index);
+ pr_debug("%s: can't parse '%s' property of node '%pOF[%d]'\n",
+ __func__, propname, np, index);
return ERR_PTR(ret);
}
@@ -93,8 +93,8 @@ struct gpio_desc *of_get_named_gpiod_flags(struct device_node *np,
if (IS_ERR(desc))
goto out;
- pr_debug("%s: parsed '%s' property of node '%s[%d]' - status (%d)\n",
- __func__, propname, np->full_name, index,
+ pr_debug("%s: parsed '%s' property of node '%pOF[%d]' - status (%d)\n",
+ __func__, propname, np, index,
PTR_ERR_OR_ZERO(desc));
out:
@@ -273,14 +273,13 @@ static int of_gpiochip_scan_gpios(struct gpio_chip *chip)
}
/**
- * of_gpio_simple_xlate - translate gpio_spec to the GPIO number and flags
+ * of_gpio_simple_xlate - translate gpiospec to the GPIO number and flags
* @gc: pointer to the gpio_chip structure
- * @np: device node of the GPIO chip
- * @gpio_spec: gpio specifier as found in the device tree
+ * @gpiospec: GPIO specifier as found in the device tree
* @flags: a flags pointer to fill in
*
* This is simple translation function, suitable for the most 1:1 mapped
- * gpio chips. This function performs only one sanity check: whether gpio
+ * GPIO chips. This function performs only one sanity check: whether GPIO
* is less than ngpios (that is specified in the gpio_chip).
*/
int of_gpio_simple_xlate(struct gpio_chip *gc,
@@ -337,7 +336,7 @@ int of_mm_gpiochip_add_data(struct device_node *np,
int ret = -ENOMEM;
struct gpio_chip *gc = &mm_gc->gc;
- gc->label = kstrdup(np->full_name, GFP_KERNEL);
+ gc->label = kasprintf(GFP_KERNEL, "%pOF", np);
if (!gc->label)
goto err0;
@@ -362,8 +361,7 @@ err2:
err1:
kfree(gc->label);
err0:
- pr_err("%s: GPIO chip registration failed with status %d\n",
- np->full_name, ret);
+ pr_err("%pOF: GPIO chip registration failed with status %d\n", np, ret);
return ret;
}
EXPORT_SYMBOL(of_mm_gpiochip_add_data);
@@ -418,8 +416,8 @@ static int of_gpiochip_add_pin_range(struct gpio_chip *chip)
group_names_propname,
index, &name);
if (strlen(name)) {
- pr_err("%s: Group name of numeric GPIO ranges must be the empty string.\n",
- np->full_name);
+ pr_err("%pOF: Group name of numeric GPIO ranges must be the empty string.\n",
+ np);
break;
}
}
@@ -434,14 +432,14 @@ static int of_gpiochip_add_pin_range(struct gpio_chip *chip)
} else {
/* npins == 0: special range */
if (pinspec.args[1]) {
- pr_err("%s: Illegal gpio-range format.\n",
- np->full_name);
+ pr_err("%pOF: Illegal gpio-range format.\n",
+ np);
break;
}
if (!group_names) {
- pr_err("%s: GPIO group range requested but no %s property.\n",
- np->full_name, group_names_propname);
+ pr_err("%pOF: GPIO group range requested but no %s property.\n",
+ np, group_names_propname);
break;
}
@@ -452,8 +450,8 @@ static int of_gpiochip_add_pin_range(struct gpio_chip *chip)
break;
if (!strlen(name)) {
- pr_err("%s: Group name of GPIO group range cannot be the empty string.\n",
- np->full_name);
+ pr_err("%pOF: Group name of GPIO group range cannot be the empty string.\n",
+ np);
break;
}
diff --git a/drivers/gpio/gpiolib-sysfs.c b/drivers/gpio/gpiolib-sysfs.c
index 16fe9742597b..3f454eaf2101 100644
--- a/drivers/gpio/gpiolib-sysfs.c
+++ b/drivers/gpio/gpiolib-sysfs.c
@@ -2,6 +2,7 @@
#include <linux/mutex.h>
#include <linux/device.h>
#include <linux/sysfs.h>
+#include <linux/gpio.h>
#include <linux/gpio/consumer.h>
#include <linux/gpio/driver.h>
#include <linux/interrupt.h>
@@ -432,6 +433,11 @@ static struct attribute *gpiochip_attrs[] = {
};
ATTRIBUTE_GROUPS(gpiochip);
+static struct gpio_desc *gpio_to_valid_desc(int gpio)
+{
+ return gpio_is_valid(gpio) ? gpio_to_desc(gpio) : NULL;
+}
+
/*
* /sys/class/gpio/export ... write-only
* integer N ... number of GPIO to export (full access)
@@ -450,7 +456,7 @@ static ssize_t export_store(struct class *class,
if (status < 0)
goto done;
- desc = gpio_to_desc(gpio);
+ desc = gpio_to_valid_desc(gpio);
/* reject invalid GPIOs */
if (!desc) {
pr_warn("%s: invalid GPIO %ld\n", __func__, gpio);
@@ -493,7 +499,7 @@ static ssize_t unexport_store(struct class *class,
if (status < 0)
goto done;
- desc = gpio_to_desc(gpio);
+ desc = gpio_to_valid_desc(gpio);
/* reject bogus commands (gpio_unexport ignores them) */
if (!desc) {
pr_warn("%s: invalid GPIO %ld\n", __func__, gpio);
@@ -534,8 +540,8 @@ static struct class gpio_class = {
/**
* gpiod_export - export a GPIO through sysfs
- * @gpio: gpio to make available, already requested
- * @direction_may_change: true if userspace may change gpio direction
+ * @desc: GPIO to make available, already requested
+ * @direction_may_change: true if userspace may change GPIO direction
* Context: arch_initcall or later
*
* When drivers want to make a GPIO accessible to userspace after they
@@ -643,7 +649,7 @@ static int match_export(struct device *dev, const void *desc)
* gpiod_export_link - create a sysfs link to an exported GPIO node
* @dev: device under which to create symlink
* @name: name of the symlink
- * @gpio: gpio to create symlink to, already exported
+ * @desc: GPIO to create symlink to, already exported
*
* Set up a symlink from /sys/.../dev/name to /sys/class/gpio/gpioN
* node. Caller is responsible for unlinking.
@@ -674,7 +680,7 @@ EXPORT_SYMBOL_GPL(gpiod_export_link);
/**
* gpiod_unexport - reverse effect of gpiod_export()
- * @gpio: gpio to make unavailable
+ * @desc: GPIO to make unavailable
*
* This is implicit on gpiod_free().
*/
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index cd003b74512f..eb80dac4e26a 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -84,7 +84,12 @@ static inline void desc_set_label(struct gpio_desc *d, const char *label)
}
/**
- * Convert a GPIO number to its descriptor
+ * gpio_to_desc - Convert a GPIO number to its descriptor
+ * @gpio: global GPIO number
+ *
+ * Returns:
+ * The GPIO descriptor associated with the given GPIO, or %NULL if no GPIO
+ * with the given number exists in the system.
*/
struct gpio_desc *gpio_to_desc(unsigned gpio)
{
@@ -111,7 +116,14 @@ struct gpio_desc *gpio_to_desc(unsigned gpio)
EXPORT_SYMBOL_GPL(gpio_to_desc);
/**
- * Get the GPIO descriptor corresponding to the given hw number for this chip.
+ * gpiochip_get_desc - get the GPIO descriptor corresponding to the given
+ * hardware number for this chip
+ * @chip: GPIO chip
+ * @hwnum: hardware number of the GPIO for this chip
+ *
+ * Returns:
+ * A pointer to the GPIO descriptor or %ERR_PTR(-EINVAL) if no GPIO exists
+ * in the given chip for the specified hardware number.
*/
struct gpio_desc *gpiochip_get_desc(struct gpio_chip *chip,
u16 hwnum)
@@ -125,9 +137,14 @@ struct gpio_desc *gpiochip_get_desc(struct gpio_chip *chip,
}
/**
- * Convert a GPIO descriptor to the integer namespace.
+ * desc_to_gpio - convert a GPIO descriptor to the integer namespace
+ * @desc: GPIO descriptor
+ *
* This should disappear in the future but is needed since we still
- * use GPIO numbers for error messages and sysfs nodes
+ * use GPIO numbers for error messages and sysfs nodes.
+ *
+ * Returns:
+ * The global GPIO number for the GPIO specified by its descriptor.
*/
int desc_to_gpio(const struct gpio_desc *desc)
{
@@ -254,7 +271,7 @@ static int gpiodev_add_to_list(struct gpio_device *gdev)
return -EBUSY;
}
-/**
+/*
* Convert a GPIO name to its descriptor
*/
static struct gpio_desc *gpio_name_to_desc(const char * const name)
@@ -878,7 +895,7 @@ out_free_le:
return ret;
}
-/**
+/*
* gpio_ioctl() - ioctl handler for the GPIO chardev
*/
static long gpio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
@@ -1077,11 +1094,9 @@ static void gpiochip_setup_devs(void)
/**
* gpiochip_add_data() - register a gpio_chip
* @chip: the chip to register, with chip->base initialized
- * Context: potentially before irqs will work
+ * @data: driver-private data associated with this chip
*
- * Returns a negative errno if the chip can't be registered, such as
- * because the chip->base is invalid or already associated with a
- * different chip. Otherwise it returns zero as a success code.
+ * Context: potentially before irqs will work
*
* When gpiochip_add_data() is called very early during boot, so that GPIOs
* can be freely used, the chip->parent device must be registered before
@@ -1093,6 +1108,11 @@ static void gpiochip_setup_devs(void)
*
* If chip->base is negative, this requests dynamic assignment of
* a range of valid GPIOs.
+ *
+ * Returns:
+ * A negative errno if the chip can't be registered, such as because the
+ * chip->base is invalid or already associated with a different chip.
+ * Otherwise it returns zero as a success code.
*/
int gpiochip_add_data(struct gpio_chip *chip, void *data)
{
@@ -1287,6 +1307,10 @@ EXPORT_SYMBOL_GPL(gpiochip_add_data);
/**
* gpiochip_get_data() - get per-subdriver data for the chip
+ * @chip: GPIO chip
+ *
+ * Returns:
+ * The per-subdriver data for the chip.
*/
void *gpiochip_get_data(struct gpio_chip *chip)
{
@@ -1370,13 +1394,16 @@ static int devm_gpio_chip_match(struct device *dev, void *res, void *data)
* devm_gpiochip_add_data() - Resource manager piochip_add_data()
* @dev: the device pointer on which irq_chip belongs to.
* @chip: the chip to register, with chip->base initialized
- * Context: potentially before irqs will work
+ * @data: driver-private data associated with this chip
*
- * Returns a negative errno if the chip can't be registered, such as
- * because the chip->base is invalid or already associated with a
- * different chip. Otherwise it returns zero as a success code.
+ * Context: potentially before irqs will work
*
* The gpio chip automatically be released when the device is unbound.
+ *
+ * Returns:
+ * A negative errno if the chip can't be registered, such as because the
+ * chip->base is invalid or already associated with a different chip.
+ * Otherwise it returns zero as a success code.
*/
int devm_gpiochip_add_data(struct device *dev, struct gpio_chip *chip,
void *data)
@@ -1422,7 +1449,7 @@ EXPORT_SYMBOL_GPL(devm_gpiochip_remove);
/**
* gpiochip_find() - iterator for locating a specific gpio_chip
* @data: data to pass to match function
- * @callback: Callback function to check gpio_chip
+ * @match: Callback function to check gpio_chip
*
* Similar to bus_find_device. It returns a reference to a gpio_chip as
* determined by a user supplied @match callback. The callback should return
@@ -1604,6 +1631,9 @@ static int gpiochip_irq_map(struct irq_domain *d, unsigned int irq,
{
struct gpio_chip *chip = d->host_data;
+ if (!gpiochip_irqchip_irq_valid(chip, hwirq))
+ return -ENXIO;
+
irq_set_chip_data(irq, chip);
/*
* This lock class tells lockdep that GPIO irqs are in a different
@@ -1670,7 +1700,9 @@ static void gpiochip_irq_relres(struct irq_data *d)
static int gpiochip_to_irq(struct gpio_chip *chip, unsigned offset)
{
- return irq_find_mapping(chip->irqdomain, offset);
+ if (!gpiochip_irqchip_irq_valid(chip, offset))
+ return -ENXIO;
+ return irq_create_mapping(chip->irqdomain, offset);
}
/**
@@ -1746,9 +1778,6 @@ int gpiochip_irqchip_add_key(struct gpio_chip *gpiochip,
struct lock_class_key *lock_key)
{
struct device_node *of_node;
- bool irq_base_set = false;
- unsigned int offset;
- unsigned irq_base = 0;
if (!gpiochip || !irqchip)
return -EINVAL;
@@ -1774,7 +1803,7 @@ int gpiochip_irqchip_add_key(struct gpio_chip *gpiochip,
* conflicting triggers. Tell the user, and reset to NONE.
*/
if (WARN(of_node && type != IRQ_TYPE_NONE,
- "%s: Ignoring %d default trigger\n", of_node->full_name, type))
+ "%pOF: Ignoring %d default trigger\n", of_node, type))
type = IRQ_TYPE_NONE;
if (has_acpi_companion(gpiochip->parent) && type != IRQ_TYPE_NONE) {
acpi_handle_warn(ACPI_HANDLE(gpiochip->parent),
@@ -1805,25 +1834,6 @@ int gpiochip_irqchip_add_key(struct gpio_chip *gpiochip,
irqchip->irq_release_resources = gpiochip_irq_relres;
}
- /*
- * Prepare the mapping since the irqchip shall be orthogonal to
- * any gpiochip calls. If the first_irq was zero, this is
- * necessary to allocate descriptors for all IRQs.
- */
- for (offset = 0; offset < gpiochip->ngpio; offset++) {
- if (!gpiochip_irqchip_irq_valid(gpiochip, offset))
- continue;
- irq_base = irq_create_mapping(gpiochip->irqdomain, offset);
- if (!irq_base_set) {
- /*
- * Store the base into the gpiochip to be used when
- * unmapping the irqs.
- */
- gpiochip->irq_base = irq_base;
- irq_base_set = true;
- }
- }
-
acpi_gpiochip_request_interrupts(gpiochip);
return 0;
@@ -1930,11 +1940,14 @@ EXPORT_SYMBOL_GPL(gpiochip_add_pingroup_range);
/**
* gpiochip_add_pin_range() - add a range for GPIO <-> pin mapping
* @chip: the gpiochip to add the range for
- * @pinctrl_name: the dev_name() of the pin controller to map to
+ * @pinctl_name: the dev_name() of the pin controller to map to
* @gpio_offset: the start offset in the current gpio_chip number space
* @pin_offset: the start offset in the pin controller number space
* @npins: the number of pins from the offset of each pin space (GPIO and
* pin controller) to accumulate in this range
+ *
+ * Returns:
+ * 0 on success, or a negative error-code on failure.
*/
int gpiochip_add_pin_range(struct gpio_chip *chip, const char *pinctl_name,
unsigned int gpio_offset, unsigned int pin_offset,
@@ -2179,7 +2192,8 @@ EXPORT_SYMBOL_GPL(gpiochip_is_requested);
/**
* gpiochip_request_own_desc - Allow GPIO chip to request its own descriptor
- * @desc: GPIO descriptor to request
+ * @chip: GPIO chip
+ * @hwnum: hardware number of the GPIO for which to request the descriptor
* @label: label for the GPIO
*
* Function allows GPIO chip drivers to request and use their own GPIO
@@ -2187,6 +2201,10 @@ EXPORT_SYMBOL_GPL(gpiochip_is_requested);
* function will not increase reference count of the GPIO chip module. This
* allows the GPIO chip module to be unloaded as needed (we assume that the
* GPIO chip driver handles freeing the GPIOs it has requested).
+ *
+ * Returns:
+ * A pointer to the GPIO descriptor, or an ERR_PTR()-encoded negative error
+ * code on failure.
*/
struct gpio_desc *gpiochip_request_own_desc(struct gpio_chip *chip, u16 hwnum,
const char *label)
@@ -2368,12 +2386,13 @@ int gpiod_direction_output(struct gpio_desc *desc, int value)
EXPORT_SYMBOL_GPL(gpiod_direction_output);
/**
- * gpiod_set_debounce - sets @debounce time for a @gpio
- * @gpio: the gpio to set debounce time
- * @debounce: debounce time is microseconds
+ * gpiod_set_debounce - sets @debounce time for a GPIO
+ * @desc: descriptor of the GPIO for which to set debounce time
+ * @debounce: debounce time in microseconds
*
- * returns -ENOTSUPP if the controller does not support setting
- * debounce.
+ * Returns:
+ * 0 on success, %-ENOTSUPP if the controller doesn't support setting the
+ * debounce time.
*/
int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce)
{
@@ -2981,6 +3000,23 @@ void gpiod_set_raw_array_value_cansleep(unsigned int array_size,
EXPORT_SYMBOL_GPL(gpiod_set_raw_array_value_cansleep);
/**
+ * gpiod_add_lookup_tables() - register GPIO device consumers
+ * @tables: list of tables of consumers to register
+ * @n: number of tables in the list
+ */
+void gpiod_add_lookup_tables(struct gpiod_lookup_table **tables, size_t n)
+{
+ unsigned int i;
+
+ mutex_lock(&gpio_lookup_lock);
+
+ for (i = 0; i < n; i++)
+ list_add_tail(&tables[i]->list, &gpio_lookup_list);
+
+ mutex_unlock(&gpio_lookup_lock);
+}
+
+/**
* gpiod_set_array_value_cansleep() - assign values to an array of GPIOs
* @array_size: number of elements in the descriptor / value arrays
* @desc_array: array of GPIO descriptors whose values will be assigned
@@ -3322,6 +3358,7 @@ EXPORT_SYMBOL_GPL(gpiod_get_index);
* @propname: name of the firmware property representing the GPIO
* @index: index of the GPIO to obtain in the consumer
* @dflags: GPIO initialization flags
+ * @label: label to attach to the requested GPIO
*
* This function can be used for drivers that get their configuration
* from firmware.
@@ -3330,6 +3367,7 @@ EXPORT_SYMBOL_GPL(gpiod_get_index);
* underlying firmware interface and then makes sure that the GPIO
* descriptor is requested before it is returned to the caller.
*
+ * Returns:
* On successful request the GPIO pin is configured in accordance with
* provided @dflags.
*
diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h
index a8be286eff86..d003ccb12781 100644
--- a/drivers/gpio/gpiolib.h
+++ b/drivers/gpio/gpiolib.h
@@ -219,7 +219,7 @@ int gpiod_hog(struct gpio_desc *desc, const char *name,
/*
* Return the GPIO number of the passed descriptor relative to its chip
*/
-static int __maybe_unused gpio_chip_hwgpio(const struct gpio_desc *desc)
+static inline int gpio_chip_hwgpio(const struct gpio_desc *desc)
{
return desc - &desc->gdev->descs[0];
}
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 24a066e1841c..a8acc197dec3 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -33,7 +33,7 @@ drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_probe_helper.o \
drm_plane_helper.o drm_dp_mst_topology.o drm_atomic_helper.o \
drm_kms_helper_common.o drm_dp_dual_mode_helper.o \
drm_simple_kms_helper.o drm_modeset_helper.o \
- drm_scdc_helper.o
+ drm_scdc_helper.o drm_gem_framebuffer_helper.o
drm_kms_helper-$(CONFIG_DRM_PANEL_BRIDGE) += bridge/panel.o
drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index faea6349228f..658bac0cdc5e 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -25,7 +25,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
amdgpu_prime.o amdgpu_vm.o amdgpu_ib.o amdgpu_pll.o \
amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o \
amdgpu_gtt_mgr.o amdgpu_vram_mgr.o amdgpu_virt.o amdgpu_atomfirmware.o \
- amdgpu_queue_mgr.o
+ amdgpu_queue_mgr.o amdgpu_vf_error.o
# add asic specific block
amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index ff7bf1a9f967..103635ab784c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -68,13 +68,16 @@
#include "gpu_scheduler.h"
#include "amdgpu_virt.h"
+#include "amdgpu_gart.h"
/*
* Modules parameters.
*/
extern int amdgpu_modeset;
extern int amdgpu_vram_limit;
+extern int amdgpu_vis_vram_limit;
extern int amdgpu_gart_size;
+extern int amdgpu_gtt_size;
extern int amdgpu_moverate;
extern int amdgpu_benchmarking;
extern int amdgpu_testing;
@@ -93,6 +96,7 @@ extern int amdgpu_bapm;
extern int amdgpu_deep_color;
extern int amdgpu_vm_size;
extern int amdgpu_vm_block_size;
+extern int amdgpu_vm_fragment_size;
extern int amdgpu_vm_fault_stop;
extern int amdgpu_vm_debug;
extern int amdgpu_vm_update_mode;
@@ -104,6 +108,7 @@ extern unsigned amdgpu_pcie_gen_cap;
extern unsigned amdgpu_pcie_lane_cap;
extern unsigned amdgpu_cg_mask;
extern unsigned amdgpu_pg_mask;
+extern unsigned amdgpu_sdma_phase_quantum;
extern char *amdgpu_disable_cu;
extern char *amdgpu_virtual_display;
extern unsigned amdgpu_pp_feature_mask;
@@ -369,78 +374,10 @@ struct amdgpu_clock {
};
/*
- * BO.
+ * GEM.
*/
-struct amdgpu_bo_list_entry {
- struct amdgpu_bo *robj;
- struct ttm_validate_buffer tv;
- struct amdgpu_bo_va *bo_va;
- uint32_t priority;
- struct page **user_pages;
- int user_invalidated;
-};
-
-struct amdgpu_bo_va_mapping {
- struct list_head list;
- struct rb_node rb;
- uint64_t start;
- uint64_t last;
- uint64_t __subtree_last;
- uint64_t offset;
- uint64_t flags;
-};
-
-/* bo virtual addresses in a specific vm */
-struct amdgpu_bo_va {
- /* protected by bo being reserved */
- struct list_head bo_list;
- struct dma_fence *last_pt_update;
- unsigned ref_count;
-
- /* protected by vm mutex and spinlock */
- struct list_head vm_status;
-
- /* mappings for this bo_va */
- struct list_head invalids;
- struct list_head valids;
-
- /* constant after initialization */
- struct amdgpu_vm *vm;
- struct amdgpu_bo *bo;
-};
#define AMDGPU_GEM_DOMAIN_MAX 0x3
-
-struct amdgpu_bo {
- /* Protected by tbo.reserved */
- u32 prefered_domains;
- u32 allowed_domains;
- struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1];
- struct ttm_placement placement;
- struct ttm_buffer_object tbo;
- struct ttm_bo_kmap_obj kmap;
- u64 flags;
- unsigned pin_count;
- void *kptr;
- u64 tiling_flags;
- u64 metadata_flags;
- void *metadata;
- u32 metadata_size;
- unsigned prime_shared_count;
- /* list of all virtual address to which this bo
- * is associated to
- */
- struct list_head va;
- /* Constant after initialization */
- struct drm_gem_object gem_base;
- struct amdgpu_bo *parent;
- struct amdgpu_bo *shadow;
-
- struct ttm_bo_kmap_obj dma_buf_vmap;
- struct amdgpu_mn *mn;
- struct list_head mn_list;
- struct list_head shadow_list;
-};
#define gem_to_amdgpu_bo(gobj) container_of((gobj), struct amdgpu_bo, gem_base)
void amdgpu_gem_object_free(struct drm_gem_object *obj);
@@ -532,49 +469,6 @@ int amdgpu_fence_slab_init(void);
void amdgpu_fence_slab_fini(void);
/*
- * GART structures, functions & helpers
- */
-struct amdgpu_mc;
-
-#define AMDGPU_GPU_PAGE_SIZE 4096
-#define AMDGPU_GPU_PAGE_MASK (AMDGPU_GPU_PAGE_SIZE - 1)
-#define AMDGPU_GPU_PAGE_SHIFT 12
-#define AMDGPU_GPU_PAGE_ALIGN(a) (((a) + AMDGPU_GPU_PAGE_MASK) & ~AMDGPU_GPU_PAGE_MASK)
-
-struct amdgpu_gart {
- dma_addr_t table_addr;
- struct amdgpu_bo *robj;
- void *ptr;
- unsigned num_gpu_pages;
- unsigned num_cpu_pages;
- unsigned table_size;
-#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
- struct page **pages;
-#endif
- bool ready;
-
- /* Asic default pte flags */
- uint64_t gart_pte_flags;
-
- const struct amdgpu_gart_funcs *gart_funcs;
-};
-
-int amdgpu_gart_table_ram_alloc(struct amdgpu_device *adev);
-void amdgpu_gart_table_ram_free(struct amdgpu_device *adev);
-int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev);
-void amdgpu_gart_table_vram_free(struct amdgpu_device *adev);
-int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev);
-void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev);
-int amdgpu_gart_init(struct amdgpu_device *adev);
-void amdgpu_gart_fini(struct amdgpu_device *adev);
-int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
- int pages);
-int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
- int pages, struct page **pagelist,
- dma_addr_t *dma_addr, uint64_t flags);
-int amdgpu_ttm_recover_gart(struct amdgpu_device *adev);
-
-/*
* VMHUB structures, functions & helpers
*/
struct amdgpu_vmhub {
@@ -598,22 +492,20 @@ struct amdgpu_mc {
* about vram size near mc fb location */
u64 mc_vram_size;
u64 visible_vram_size;
- u64 gtt_size;
- u64 gtt_start;
- u64 gtt_end;
+ u64 gart_size;
+ u64 gart_start;
+ u64 gart_end;
u64 vram_start;
u64 vram_end;
unsigned vram_width;
u64 real_vram_size;
int vram_mtrr;
- u64 gtt_base_align;
u64 mc_mask;
const struct firmware *fw; /* MC firmware */
uint32_t fw_version;
struct amdgpu_irq_src vm_fault;
uint32_t vram_type;
uint32_t srbm_soft_reset;
- struct amdgpu_mode_mc_save save;
bool prt_warning;
uint64_t stolen_size;
/* apertures */
@@ -719,15 +611,15 @@ typedef enum _AMDGPU_DOORBELL64_ASSIGNMENT
/* overlap the doorbell assignment with VCN as they are mutually exclusive
* VCE engine's doorbell is 32 bit and two VCE ring share one QWORD
*/
- AMDGPU_DOORBELL64_RING0_1 = 0xF8,
- AMDGPU_DOORBELL64_RING2_3 = 0xF9,
- AMDGPU_DOORBELL64_RING4_5 = 0xFA,
- AMDGPU_DOORBELL64_RING6_7 = 0xFB,
+ AMDGPU_DOORBELL64_UVD_RING0_1 = 0xF8,
+ AMDGPU_DOORBELL64_UVD_RING2_3 = 0xF9,
+ AMDGPU_DOORBELL64_UVD_RING4_5 = 0xFA,
+ AMDGPU_DOORBELL64_UVD_RING6_7 = 0xFB,
- AMDGPU_DOORBELL64_UVD_RING0_1 = 0xFC,
- AMDGPU_DOORBELL64_UVD_RING2_3 = 0xFD,
- AMDGPU_DOORBELL64_UVD_RING4_5 = 0xFE,
- AMDGPU_DOORBELL64_UVD_RING6_7 = 0xFF,
+ AMDGPU_DOORBELL64_VCE_RING0_1 = 0xFC,
+ AMDGPU_DOORBELL64_VCE_RING2_3 = 0xFD,
+ AMDGPU_DOORBELL64_VCE_RING4_5 = 0xFE,
+ AMDGPU_DOORBELL64_VCE_RING6_7 = 0xFF,
AMDGPU_DOORBELL64_MAX_ASSIGNMENT = 0xFF,
AMDGPU_DOORBELL64_INVALID = 0xFFFF
@@ -857,6 +749,7 @@ void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr);
struct amdgpu_fpriv {
struct amdgpu_vm vm;
struct amdgpu_bo_va *prt_va;
+ struct amdgpu_bo_va *csa_va;
struct mutex bo_list_lock;
struct idr bo_list_handles;
struct amdgpu_ctx_mgr ctx_mgr;
@@ -866,6 +759,14 @@ struct amdgpu_fpriv {
/*
* residency list
*/
+struct amdgpu_bo_list_entry {
+ struct amdgpu_bo *robj;
+ struct ttm_validate_buffer tv;
+ struct amdgpu_bo_va *bo_va;
+ uint32_t priority;
+ struct page **user_pages;
+ int user_invalidated;
+};
struct amdgpu_bo_list {
struct mutex lock;
@@ -1159,7 +1060,9 @@ struct amdgpu_cs_parser {
struct list_head validated;
struct dma_fence *fence;
uint64_t bytes_moved_threshold;
+ uint64_t bytes_moved_vis_threshold;
uint64_t bytes_moved;
+ uint64_t bytes_moved_vis;
struct amdgpu_bo_list_entry *evictable;
/* user fence */
@@ -1230,8 +1133,6 @@ struct amdgpu_wb {
int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb);
void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb);
-int amdgpu_wb_get_64bit(struct amdgpu_device *adev, u32 *wb);
-void amdgpu_wb_free_64bit(struct amdgpu_device *adev, u32 wb);
void amdgpu_get_pcie_info(struct amdgpu_device *adev);
@@ -1525,7 +1426,7 @@ struct amdgpu_device {
bool is_atom_fw;
uint8_t *bios;
uint32_t bios_size;
- struct amdgpu_bo *stollen_vga_memory;
+ struct amdgpu_bo *stolen_vga_memory;
uint32_t bios_scratch_reg_offset;
uint32_t bios_scratch[AMDGPU_BIOS_NUM_SCRATCH];
@@ -1557,6 +1458,10 @@ struct amdgpu_device {
spinlock_t gc_cac_idx_lock;
amdgpu_rreg_t gc_cac_rreg;
amdgpu_wreg_t gc_cac_wreg;
+ /* protects concurrent se_cac register access */
+ spinlock_t se_cac_idx_lock;
+ amdgpu_rreg_t se_cac_rreg;
+ amdgpu_wreg_t se_cac_wreg;
/* protects concurrent ENDPOINT (audio) register access */
spinlock_t audio_endpt_idx_lock;
amdgpu_block_rreg_t audio_endpt_rreg;
@@ -1579,9 +1484,6 @@ struct amdgpu_device {
struct amdgpu_mman mman;
struct amdgpu_vram_scratch vram_scratch;
struct amdgpu_wb wb;
- atomic64_t vram_usage;
- atomic64_t vram_vis_usage;
- atomic64_t gtt_usage;
atomic64_t num_bytes_moved;
atomic64_t num_evictions;
atomic64_t num_vram_cpu_page_faults;
@@ -1593,6 +1495,7 @@ struct amdgpu_device {
spinlock_t lock;
s64 last_update_us;
s64 accum_us; /* accumulated microseconds */
+ s64 accum_us_vis; /* for visible VRAM */
u32 log2_max_MBps;
} mm_stats;
@@ -1687,6 +1590,8 @@ struct amdgpu_device {
bool has_hw_reset;
u8 reset_magic[AMDGPU_RESET_MAGIC_NUM];
+ /* record last mm index being written through WREG32*/
+ unsigned long last_mm_index;
};
static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev)
@@ -1742,6 +1647,8 @@ void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v);
#define WREG32_DIDT(reg, v) adev->didt_wreg(adev, (reg), (v))
#define RREG32_GC_CAC(reg) adev->gc_cac_rreg(adev, (reg))
#define WREG32_GC_CAC(reg, v) adev->gc_cac_wreg(adev, (reg), (v))
+#define RREG32_SE_CAC(reg) adev->se_cac_rreg(adev, (reg))
+#define WREG32_SE_CAC(reg, v) adev->se_cac_wreg(adev, (reg), (v))
#define RREG32_AUDIO_ENDPT(block, reg) adev->audio_endpt_rreg(adev, (block), (reg))
#define WREG32_AUDIO_ENDPT(block, reg, v) adev->audio_endpt_wreg(adev, (block), (reg), (v))
#define WREG32_P(reg, val, mask) \
@@ -1792,50 +1699,6 @@ void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v);
#define RBIOS16(i) (RBIOS8(i) | (RBIOS8((i)+1) << 8))
#define RBIOS32(i) ((RBIOS16(i)) | (RBIOS16((i)+2) << 16))
-/*
- * RING helpers.
- */
-static inline void amdgpu_ring_write(struct amdgpu_ring *ring, uint32_t v)
-{
- if (ring->count_dw <= 0)
- DRM_ERROR("amdgpu: writing more dwords to the ring than expected!\n");
- ring->ring[ring->wptr++ & ring->buf_mask] = v;
- ring->wptr &= ring->ptr_mask;
- ring->count_dw--;
-}
-
-static inline void amdgpu_ring_write_multiple(struct amdgpu_ring *ring, void *src, int count_dw)
-{
- unsigned occupied, chunk1, chunk2;
- void *dst;
-
- if (unlikely(ring->count_dw < count_dw)) {
- DRM_ERROR("amdgpu: writing more dwords to the ring than expected!\n");
- return;
- }
-
- occupied = ring->wptr & ring->buf_mask;
- dst = (void *)&ring->ring[occupied];
- chunk1 = ring->buf_mask + 1 - occupied;
- chunk1 = (chunk1 >= count_dw) ? count_dw: chunk1;
- chunk2 = count_dw - chunk1;
- chunk1 <<= 2;
- chunk2 <<= 2;
-
- if (chunk1)
- memcpy(dst, src, chunk1);
-
- if (chunk2) {
- src += chunk1;
- dst = (void *)ring->ring;
- memcpy(dst, src, chunk2);
- }
-
- ring->wptr += count_dw;
- ring->wptr &= ring->ptr_mask;
- ring->count_dw -= count_dw;
-}
-
static inline struct amdgpu_sdma_instance *
amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
{
@@ -1898,7 +1761,6 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
#define amdgpu_ih_get_wptr(adev) (adev)->irq.ih_funcs->get_wptr((adev))
#define amdgpu_ih_decode_iv(adev, iv) (adev)->irq.ih_funcs->decode_iv((adev), (iv))
#define amdgpu_ih_set_rptr(adev) (adev)->irq.ih_funcs->set_rptr((adev))
-#define amdgpu_display_set_vga_render_state(adev, r) (adev)->mode_info.funcs->set_vga_render_state((adev), (r))
#define amdgpu_display_vblank_get_counter(adev, crtc) (adev)->mode_info.funcs->vblank_get_counter((adev), (crtc))
#define amdgpu_display_vblank_wait(adev, crtc) (adev)->mode_info.funcs->vblank_wait((adev), (crtc))
#define amdgpu_display_backlight_set_level(adev, e, l) (adev)->mode_info.funcs->backlight_set_level((e), (l))
@@ -1911,8 +1773,6 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
#define amdgpu_display_page_flip_get_scanoutpos(adev, crtc, vbl, pos) (adev)->mode_info.funcs->page_flip_get_scanoutpos((adev), (crtc), (vbl), (pos))
#define amdgpu_display_add_encoder(adev, e, s, c) (adev)->mode_info.funcs->add_encoder((adev), (e), (s), (c))
#define amdgpu_display_add_connector(adev, ci, sd, ct, ib, coi, h, r) (adev)->mode_info.funcs->add_connector((adev), (ci), (sd), (ct), (ib), (coi), (h), (r))
-#define amdgpu_display_stop_mc_access(adev, s) (adev)->mode_info.funcs->stop_mc_access((adev), (s))
-#define amdgpu_display_resume_mc_access(adev, s) (adev)->mode_info.funcs->resume_mc_access((adev), (s))
#define amdgpu_emit_copy_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_copy_buffer((ib), (s), (d), (b))
#define amdgpu_emit_fill_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_fill_buffer((ib), (s), (d), (b))
#define amdgpu_gfx_get_gpu_clock_counter(adev) (adev)->gfx.funcs->get_gpu_clock_counter((adev))
@@ -1927,7 +1787,8 @@ void amdgpu_pci_config_reset(struct amdgpu_device *adev);
bool amdgpu_need_post(struct amdgpu_device *adev);
void amdgpu_update_display_priority(struct amdgpu_device *adev);
-void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes);
+void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes,
+ u64 num_vis_bytes);
void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain);
bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages);
@@ -1943,7 +1804,7 @@ bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm);
uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
struct ttm_mem_reg *mem);
void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base);
-void amdgpu_gtt_location(struct amdgpu_device *adev, struct amdgpu_mc *mc);
+void amdgpu_gart_location(struct amdgpu_device *adev, struct amdgpu_mc *mc);
void amdgpu_ttm_set_active_vram_size(struct amdgpu_device *adev, u64 size);
int amdgpu_ttm_init(struct amdgpu_device *adev);
void amdgpu_ttm_fini(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
index 06879d1dcabd..a52795d9b458 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
@@ -285,19 +285,20 @@ static int acp_hw_init(void *handle)
return 0;
else if (r)
return r;
+ if (adev->asic_type != CHIP_STONEY) {
+ adev->acp.acp_genpd = kzalloc(sizeof(struct acp_pm_domain), GFP_KERNEL);
+ if (adev->acp.acp_genpd == NULL)
+ return -ENOMEM;
- adev->acp.acp_genpd = kzalloc(sizeof(struct acp_pm_domain), GFP_KERNEL);
- if (adev->acp.acp_genpd == NULL)
- return -ENOMEM;
-
- adev->acp.acp_genpd->gpd.name = "ACP_AUDIO";
- adev->acp.acp_genpd->gpd.power_off = acp_poweroff;
- adev->acp.acp_genpd->gpd.power_on = acp_poweron;
+ adev->acp.acp_genpd->gpd.name = "ACP_AUDIO";
+ adev->acp.acp_genpd->gpd.power_off = acp_poweroff;
+ adev->acp.acp_genpd->gpd.power_on = acp_poweron;
- adev->acp.acp_genpd->cgs_dev = adev->acp.cgs_device;
+ adev->acp.acp_genpd->cgs_dev = adev->acp.cgs_device;
- pm_genpd_init(&adev->acp.acp_genpd->gpd, NULL, false);
+ pm_genpd_init(&adev->acp.acp_genpd->gpd, NULL, false);
+ }
adev->acp.acp_cell = kzalloc(sizeof(struct mfd_cell) * ACP_DEVS,
GFP_KERNEL);
@@ -319,14 +320,29 @@ static int acp_hw_init(void *handle)
return -ENOMEM;
}
- i2s_pdata[0].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET;
+ switch (adev->asic_type) {
+ case CHIP_STONEY:
+ i2s_pdata[0].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET |
+ DW_I2S_QUIRK_16BIT_IDX_OVERRIDE;
+ break;
+ default:
+ i2s_pdata[0].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET;
+ }
i2s_pdata[0].cap = DWC_I2S_PLAY;
i2s_pdata[0].snd_rates = SNDRV_PCM_RATE_8000_96000;
i2s_pdata[0].i2s_reg_comp1 = ACP_I2S_COMP1_PLAY_REG_OFFSET;
i2s_pdata[0].i2s_reg_comp2 = ACP_I2S_COMP2_PLAY_REG_OFFSET;
+ switch (adev->asic_type) {
+ case CHIP_STONEY:
+ i2s_pdata[1].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET |
+ DW_I2S_QUIRK_COMP_PARAM1 |
+ DW_I2S_QUIRK_16BIT_IDX_OVERRIDE;
+ break;
+ default:
+ i2s_pdata[1].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET |
+ DW_I2S_QUIRK_COMP_PARAM1;
+ }
- i2s_pdata[1].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET |
- DW_I2S_QUIRK_COMP_PARAM1;
i2s_pdata[1].cap = DWC_I2S_RECORD;
i2s_pdata[1].snd_rates = SNDRV_PCM_RATE_8000_96000;
i2s_pdata[1].i2s_reg_comp1 = ACP_I2S_COMP1_CAP_REG_OFFSET;
@@ -373,12 +389,14 @@ static int acp_hw_init(void *handle)
if (r)
return r;
- for (i = 0; i < ACP_DEVS ; i++) {
- dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i);
- r = pm_genpd_add_device(&adev->acp.acp_genpd->gpd, dev);
- if (r) {
- dev_err(dev, "Failed to add dev to genpd\n");
- return r;
+ if (adev->asic_type != CHIP_STONEY) {
+ for (i = 0; i < ACP_DEVS ; i++) {
+ dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i);
+ r = pm_genpd_add_device(&adev->acp.acp_genpd->gpd, dev);
+ if (r) {
+ dev_err(dev, "Failed to add dev to genpd\n");
+ return r;
+ }
}
}
@@ -398,20 +416,22 @@ static int acp_hw_fini(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* return early if no ACP */
- if (!adev->acp.acp_genpd)
+ if (!adev->acp.acp_cell)
return 0;
- for (i = 0; i < ACP_DEVS ; i++) {
- dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i);
- ret = pm_genpd_remove_device(&adev->acp.acp_genpd->gpd, dev);
- /* If removal fails, dont giveup and try rest */
- if (ret)
- dev_err(dev, "remove dev from genpd failed\n");
+ if (adev->acp.acp_genpd) {
+ for (i = 0; i < ACP_DEVS ; i++) {
+ dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i);
+ ret = pm_genpd_remove_device(&adev->acp.acp_genpd->gpd, dev);
+ /* If removal fails, dont giveup and try rest */
+ if (ret)
+ dev_err(dev, "remove dev from genpd failed\n");
+ }
+ kfree(adev->acp.acp_genpd);
}
mfd_remove_devices(adev->acp.parent);
kfree(adev->acp.acp_res);
- kfree(adev->acp.acp_genpd);
kfree(adev->acp.acp_cell);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
index ef79551b4cb7..57afad79f55d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
@@ -30,10 +30,10 @@
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include "amdgpu.h"
+#include "amdgpu_pm.h"
#include "amd_acpi.h"
#include "atom.h"
-extern void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev);
/* Call the ATIF method
*/
/**
@@ -289,7 +289,7 @@ out:
* handles it.
* Returns NOTIFY code
*/
-int amdgpu_atif_handler(struct amdgpu_device *adev,
+static int amdgpu_atif_handler(struct amdgpu_device *adev,
struct acpi_bus_event *event)
{
struct amdgpu_atif *atif = &adev->atif;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index 37971d9402e3..5432af39a674 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -27,16 +27,15 @@
#include "amdgpu_gfx.h"
#include <linux/module.h>
-const struct kfd2kgd_calls *kfd2kgd;
const struct kgd2kfd_calls *kgd2kfd;
-bool (*kgd2kfd_init_p)(unsigned, const struct kgd2kfd_calls**);
+bool (*kgd2kfd_init_p)(unsigned int, const struct kgd2kfd_calls**);
int amdgpu_amdkfd_init(void)
{
int ret;
#if defined(CONFIG_HSA_AMD_MODULE)
- int (*kgd2kfd_init_p)(unsigned, const struct kgd2kfd_calls**);
+ int (*kgd2kfd_init_p)(unsigned int, const struct kgd2kfd_calls**);
kgd2kfd_init_p = symbol_request(kgd2kfd_init);
@@ -61,8 +60,21 @@ int amdgpu_amdkfd_init(void)
return ret;
}
-bool amdgpu_amdkfd_load_interface(struct amdgpu_device *adev)
+void amdgpu_amdkfd_fini(void)
+{
+ if (kgd2kfd) {
+ kgd2kfd->exit();
+ symbol_put(kgd2kfd_init);
+ }
+}
+
+void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev)
{
+ const struct kfd2kgd_calls *kfd2kgd;
+
+ if (!kgd2kfd)
+ return;
+
switch (adev->asic_type) {
#ifdef CONFIG_DRM_AMDGPU_CIK
case CHIP_KAVERI:
@@ -73,25 +85,12 @@ bool amdgpu_amdkfd_load_interface(struct amdgpu_device *adev)
kfd2kgd = amdgpu_amdkfd_gfx_8_0_get_functions();
break;
default:
- return false;
+ dev_info(adev->dev, "kfd not supported on this ASIC\n");
+ return;
}
- return true;
-}
-
-void amdgpu_amdkfd_fini(void)
-{
- if (kgd2kfd) {
- kgd2kfd->exit();
- symbol_put(kgd2kfd_init);
- }
-}
-
-void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev)
-{
- if (kgd2kfd)
- adev->kfd = kgd2kfd->probe((struct kgd_dev *)adev,
- adev->pdev, kfd2kgd);
+ adev->kfd = kgd2kfd->probe((struct kgd_dev *)adev,
+ adev->pdev, kfd2kgd);
}
void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
@@ -184,7 +183,8 @@ int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
return -ENOMEM;
r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_GTT,
- AMDGPU_GEM_CREATE_CPU_GTT_USWC, NULL, NULL, &(*mem)->bo);
+ AMDGPU_GEM_CREATE_CPU_GTT_USWC, NULL, NULL, 0,
+ &(*mem)->bo);
if (r) {
dev_err(adev->dev,
"failed to allocate BO for amdkfd (%d)\n", r);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index 73f83a10ae14..8d689ab7e429 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -26,6 +26,7 @@
#define AMDGPU_AMDKFD_H_INCLUDED
#include <linux/types.h>
+#include <linux/mmu_context.h>
#include <kgd_kfd_interface.h>
struct amdgpu_device;
@@ -39,8 +40,6 @@ struct kgd_mem {
int amdgpu_amdkfd_init(void);
void amdgpu_amdkfd_fini(void);
-bool amdgpu_amdkfd_load_interface(struct amdgpu_device *adev);
-
void amdgpu_amdkfd_suspend(struct amdgpu_device *adev);
int amdgpu_amdkfd_resume(struct amdgpu_device *adev);
void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev,
@@ -62,4 +61,19 @@ uint64_t get_gpu_clock_counter(struct kgd_dev *kgd);
uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd);
+#define read_user_wptr(mmptr, wptr, dst) \
+ ({ \
+ bool valid = false; \
+ if ((mmptr) && (wptr)) { \
+ if ((mmptr) == current->mm) { \
+ valid = !get_user((dst), (wptr)); \
+ } else if (current->mm == NULL) { \
+ use_mm(mmptr); \
+ valid = !get_user((dst), (wptr)); \
+ unuse_mm(mmptr); \
+ } \
+ } \
+ valid; \
+ })
+
#endif /* AMDGPU_AMDKFD_H_INCLUDED */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
index 5254562fd0f9..b9dbbf9cb8b0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
@@ -39,6 +39,12 @@
#include "gmc/gmc_7_1_sh_mask.h"
#include "cik_structs.h"
+enum hqd_dequeue_request_type {
+ NO_ACTION = 0,
+ DRAIN_PIPE,
+ RESET_WAVES
+};
+
enum {
MAX_TRAPID = 8, /* 3 bits in the bitfield. */
MAX_WATCH_ADDRESSES = 4
@@ -96,12 +102,15 @@ static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id,
uint32_t hpd_size, uint64_t hpd_gpu_addr);
static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id);
static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
- uint32_t queue_id, uint32_t __user *wptr);
+ uint32_t queue_id, uint32_t __user *wptr,
+ uint32_t wptr_shift, uint32_t wptr_mask,
+ struct mm_struct *mm);
static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd);
static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
uint32_t pipe_id, uint32_t queue_id);
-static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
+static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
+ enum kfd_preempt_type reset_type,
unsigned int utimeout, uint32_t pipe_id,
uint32_t queue_id);
static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd);
@@ -126,6 +135,33 @@ static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid);
static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type);
+static void set_scratch_backing_va(struct kgd_dev *kgd,
+ uint64_t va, uint32_t vmid);
+
+/* Because of REG_GET_FIELD() being used, we put this function in the
+ * asic specific file.
+ */
+static int get_tile_config(struct kgd_dev *kgd,
+ struct tile_config *config)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
+
+ config->gb_addr_config = adev->gfx.config.gb_addr_config;
+ config->num_banks = REG_GET_FIELD(adev->gfx.config.mc_arb_ramcfg,
+ MC_ARB_RAMCFG, NOOFBANK);
+ config->num_ranks = REG_GET_FIELD(adev->gfx.config.mc_arb_ramcfg,
+ MC_ARB_RAMCFG, NOOFRANKS);
+
+ config->tile_config_ptr = adev->gfx.config.tile_mode_array;
+ config->num_tile_configs =
+ ARRAY_SIZE(adev->gfx.config.tile_mode_array);
+ config->macro_tile_config_ptr =
+ adev->gfx.config.macrotile_mode_array;
+ config->num_macro_tile_configs =
+ ARRAY_SIZE(adev->gfx.config.macrotile_mode_array);
+
+ return 0;
+}
static const struct kfd2kgd_calls kfd2kgd = {
.init_gtt_mem_allocation = alloc_gtt_mem,
@@ -150,7 +186,9 @@ static const struct kfd2kgd_calls kfd2kgd = {
.get_atc_vmid_pasid_mapping_pasid = get_atc_vmid_pasid_mapping_pasid,
.get_atc_vmid_pasid_mapping_valid = get_atc_vmid_pasid_mapping_valid,
.write_vmid_invalidate_request = write_vmid_invalidate_request,
- .get_fw_version = get_fw_version
+ .get_fw_version = get_fw_version,
+ .set_scratch_backing_va = set_scratch_backing_va,
+ .get_tile_config = get_tile_config,
};
struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions(void)
@@ -186,7 +224,7 @@ static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id,
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
- uint32_t mec = (++pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
+ uint32_t mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
uint32_t pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
lock_srbm(kgd, mec, pipe, queue_id, 0);
@@ -290,20 +328,38 @@ static inline struct cik_sdma_rlc_registers *get_sdma_mqd(void *mqd)
}
static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
- uint32_t queue_id, uint32_t __user *wptr)
+ uint32_t queue_id, uint32_t __user *wptr,
+ uint32_t wptr_shift, uint32_t wptr_mask,
+ struct mm_struct *mm)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
- uint32_t wptr_shadow, is_wptr_shadow_valid;
struct cik_mqd *m;
+ uint32_t *mqd_hqd;
+ uint32_t reg, wptr_val, data;
m = get_mqd(mqd);
- is_wptr_shadow_valid = !get_user(wptr_shadow, wptr);
- if (is_wptr_shadow_valid)
- m->cp_hqd_pq_wptr = wptr_shadow;
-
acquire_queue(kgd, pipe_id, queue_id);
- gfx_v7_0_mqd_commit(adev, m);
+
+ /* HQD registers extend from CP_MQD_BASE_ADDR to CP_MQD_CONTROL. */
+ mqd_hqd = &m->cp_mqd_base_addr_lo;
+
+ for (reg = mmCP_MQD_BASE_ADDR; reg <= mmCP_MQD_CONTROL; reg++)
+ WREG32(reg, mqd_hqd[reg - mmCP_MQD_BASE_ADDR]);
+
+ /* Copy userspace write pointer value to register.
+ * Activate doorbell logic to monitor subsequent changes.
+ */
+ data = REG_SET_FIELD(m->cp_hqd_pq_doorbell_control,
+ CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1);
+ WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, data);
+
+ if (read_user_wptr(mm, wptr, wptr_val))
+ WREG32(mmCP_HQD_PQ_WPTR, (wptr_val << wptr_shift) & wptr_mask);
+
+ data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1);
+ WREG32(mmCP_HQD_ACTIVE, data);
+
release_queue(kgd);
return 0;
@@ -382,30 +438,99 @@ static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
return false;
}
-static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
+static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
+ enum kfd_preempt_type reset_type,
unsigned int utimeout, uint32_t pipe_id,
uint32_t queue_id)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
uint32_t temp;
- int timeout = utimeout;
+ enum hqd_dequeue_request_type type;
+ unsigned long flags, end_jiffies;
+ int retry;
acquire_queue(kgd, pipe_id, queue_id);
WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, 0);
- WREG32(mmCP_HQD_DEQUEUE_REQUEST, reset_type);
+ switch (reset_type) {
+ case KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN:
+ type = DRAIN_PIPE;
+ break;
+ case KFD_PREEMPT_TYPE_WAVEFRONT_RESET:
+ type = RESET_WAVES;
+ break;
+ default:
+ type = DRAIN_PIPE;
+ break;
+ }
+
+ /* Workaround: If IQ timer is active and the wait time is close to or
+ * equal to 0, dequeueing is not safe. Wait until either the wait time
+ * is larger or timer is cleared. Also, ensure that IQ_REQ_PEND is
+ * cleared before continuing. Also, ensure wait times are set to at
+ * least 0x3.
+ */
+ local_irq_save(flags);
+ preempt_disable();
+ retry = 5000; /* wait for 500 usecs at maximum */
+ while (true) {
+ temp = RREG32(mmCP_HQD_IQ_TIMER);
+ if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, PROCESSING_IQ)) {
+ pr_debug("HW is processing IQ\n");
+ goto loop;
+ }
+ if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, ACTIVE)) {
+ if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, RETRY_TYPE)
+ == 3) /* SEM-rearm is safe */
+ break;
+ /* Wait time 3 is safe for CP, but our MMIO read/write
+ * time is close to 1 microsecond, so check for 10 to
+ * leave more buffer room
+ */
+ if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, WAIT_TIME)
+ >= 10)
+ break;
+ pr_debug("IQ timer is active\n");
+ } else
+ break;
+loop:
+ if (!retry) {
+ pr_err("CP HQD IQ timer status time out\n");
+ break;
+ }
+ ndelay(100);
+ --retry;
+ }
+ retry = 1000;
+ while (true) {
+ temp = RREG32(mmCP_HQD_DEQUEUE_REQUEST);
+ if (!(temp & CP_HQD_DEQUEUE_REQUEST__IQ_REQ_PEND_MASK))
+ break;
+ pr_debug("Dequeue request is pending\n");
+ if (!retry) {
+ pr_err("CP HQD dequeue request time out\n");
+ break;
+ }
+ ndelay(100);
+ --retry;
+ }
+ local_irq_restore(flags);
+ preempt_enable();
+
+ WREG32(mmCP_HQD_DEQUEUE_REQUEST, type);
+
+ end_jiffies = (utimeout * HZ / 1000) + jiffies;
while (true) {
temp = RREG32(mmCP_HQD_ACTIVE);
- if (temp & CP_HQD_ACTIVE__ACTIVE_MASK)
+ if (!(temp & CP_HQD_ACTIVE__ACTIVE_MASK))
break;
- if (timeout <= 0) {
- pr_err("kfd: cp queue preemption time out.\n");
+ if (time_after(jiffies, end_jiffies)) {
+ pr_err("cp queue preemption time out\n");
release_queue(kgd);
return -ETIME;
}
- msleep(20);
- timeout -= 20;
+ usleep_range(500, 1000);
}
release_queue(kgd);
@@ -556,6 +681,16 @@ static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid)
WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
}
+static void set_scratch_backing_va(struct kgd_dev *kgd,
+ uint64_t va, uint32_t vmid)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
+
+ lock_srbm(kgd, 0, 0, 0, vmid);
+ WREG32(mmSH_HIDDEN_PRIVATE_BASE_VMID, va);
+ unlock_srbm(kgd);
+}
+
static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type)
{
struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
@@ -566,42 +701,42 @@ static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type)
switch (type) {
case KGD_ENGINE_PFP:
hdr = (const union amdgpu_firmware_header *)
- adev->gfx.pfp_fw->data;
+ adev->gfx.pfp_fw->data;
break;
case KGD_ENGINE_ME:
hdr = (const union amdgpu_firmware_header *)
- adev->gfx.me_fw->data;
+ adev->gfx.me_fw->data;
break;
case KGD_ENGINE_CE:
hdr = (const union amdgpu_firmware_header *)
- adev->gfx.ce_fw->data;
+ adev->gfx.ce_fw->data;
break;
case KGD_ENGINE_MEC1:
hdr = (const union amdgpu_firmware_header *)
- adev->gfx.mec_fw->data;
+ adev->gfx.mec_fw->data;
break;
case KGD_ENGINE_MEC2:
hdr = (const union amdgpu_firmware_header *)
- adev->gfx.mec2_fw->data;
+ adev->gfx.mec2_fw->data;
break;
case KGD_ENGINE_RLC:
hdr = (const union amdgpu_firmware_header *)
- adev->gfx.rlc_fw->data;
+ adev->gfx.rlc_fw->data;
break;
case KGD_ENGINE_SDMA1:
hdr = (const union amdgpu_firmware_header *)
- adev->sdma.instance[0].fw->data;
+ adev->sdma.instance[0].fw->data;
break;
case KGD_ENGINE_SDMA2:
hdr = (const union amdgpu_firmware_header *)
- adev->sdma.instance[1].fw->data;
+ adev->sdma.instance[1].fw->data;
break;
default:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
index 133d06671e46..309f2419c6d8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
@@ -39,6 +39,12 @@
#include "vi_structs.h"
#include "vid.h"
+enum hqd_dequeue_request_type {
+ NO_ACTION = 0,
+ DRAIN_PIPE,
+ RESET_WAVES
+};
+
struct cik_sdma_rlc_registers;
/*
@@ -55,12 +61,15 @@ static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id,
uint32_t hpd_size, uint64_t hpd_gpu_addr);
static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id);
static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
- uint32_t queue_id, uint32_t __user *wptr);
+ uint32_t queue_id, uint32_t __user *wptr,
+ uint32_t wptr_shift, uint32_t wptr_mask,
+ struct mm_struct *mm);
static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd);
static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
uint32_t pipe_id, uint32_t queue_id);
static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd);
-static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
+static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
+ enum kfd_preempt_type reset_type,
unsigned int utimeout, uint32_t pipe_id,
uint32_t queue_id);
static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
@@ -85,6 +94,33 @@ static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
uint8_t vmid);
static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid);
static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type);
+static void set_scratch_backing_va(struct kgd_dev *kgd,
+ uint64_t va, uint32_t vmid);
+
+/* Because of REG_GET_FIELD() being used, we put this function in the
+ * asic specific file.
+ */
+static int get_tile_config(struct kgd_dev *kgd,
+ struct tile_config *config)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
+
+ config->gb_addr_config = adev->gfx.config.gb_addr_config;
+ config->num_banks = REG_GET_FIELD(adev->gfx.config.mc_arb_ramcfg,
+ MC_ARB_RAMCFG, NOOFBANK);
+ config->num_ranks = REG_GET_FIELD(adev->gfx.config.mc_arb_ramcfg,
+ MC_ARB_RAMCFG, NOOFRANKS);
+
+ config->tile_config_ptr = adev->gfx.config.tile_mode_array;
+ config->num_tile_configs =
+ ARRAY_SIZE(adev->gfx.config.tile_mode_array);
+ config->macro_tile_config_ptr =
+ adev->gfx.config.macrotile_mode_array;
+ config->num_macro_tile_configs =
+ ARRAY_SIZE(adev->gfx.config.macrotile_mode_array);
+
+ return 0;
+}
static const struct kfd2kgd_calls kfd2kgd = {
.init_gtt_mem_allocation = alloc_gtt_mem,
@@ -111,7 +147,9 @@ static const struct kfd2kgd_calls kfd2kgd = {
.get_atc_vmid_pasid_mapping_valid =
get_atc_vmid_pasid_mapping_valid,
.write_vmid_invalidate_request = write_vmid_invalidate_request,
- .get_fw_version = get_fw_version
+ .get_fw_version = get_fw_version,
+ .set_scratch_backing_va = set_scratch_backing_va,
+ .get_tile_config = get_tile_config,
};
struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions(void)
@@ -147,7 +185,7 @@ static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id,
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
- uint32_t mec = (++pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
+ uint32_t mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
uint32_t pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
lock_srbm(kgd, mec, pipe, queue_id, 0);
@@ -216,7 +254,7 @@ static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id)
uint32_t mec;
uint32_t pipe;
- mec = (++pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
+ mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
lock_srbm(kgd, mec, pipe, 0, 0);
@@ -244,20 +282,67 @@ static inline struct cik_sdma_rlc_registers *get_sdma_mqd(void *mqd)
}
static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
- uint32_t queue_id, uint32_t __user *wptr)
+ uint32_t queue_id, uint32_t __user *wptr,
+ uint32_t wptr_shift, uint32_t wptr_mask,
+ struct mm_struct *mm)
{
- struct vi_mqd *m;
- uint32_t shadow_wptr, valid_wptr;
struct amdgpu_device *adev = get_amdgpu_device(kgd);
+ struct vi_mqd *m;
+ uint32_t *mqd_hqd;
+ uint32_t reg, wptr_val, data;
m = get_mqd(mqd);
- valid_wptr = copy_from_user(&shadow_wptr, wptr, sizeof(shadow_wptr));
- if (valid_wptr == 0)
- m->cp_hqd_pq_wptr = shadow_wptr;
-
acquire_queue(kgd, pipe_id, queue_id);
- gfx_v8_0_mqd_commit(adev, mqd);
+
+ /* HIQ is set during driver init period with vmid set to 0*/
+ if (m->cp_hqd_vmid == 0) {
+ uint32_t value, mec, pipe;
+
+ mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
+ pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
+
+ pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n",
+ mec, pipe, queue_id);
+ value = RREG32(mmRLC_CP_SCHEDULERS);
+ value = REG_SET_FIELD(value, RLC_CP_SCHEDULERS, scheduler1,
+ ((mec << 5) | (pipe << 3) | queue_id | 0x80));
+ WREG32(mmRLC_CP_SCHEDULERS, value);
+ }
+
+ /* HQD registers extend from CP_MQD_BASE_ADDR to CP_HQD_EOP_WPTR_MEM. */
+ mqd_hqd = &m->cp_mqd_base_addr_lo;
+
+ for (reg = mmCP_MQD_BASE_ADDR; reg <= mmCP_HQD_EOP_CONTROL; reg++)
+ WREG32(reg, mqd_hqd[reg - mmCP_MQD_BASE_ADDR]);
+
+ /* Tonga errata: EOP RPTR/WPTR should be left unmodified.
+ * This is safe since EOP RPTR==WPTR for any inactive HQD
+ * on ASICs that do not support context-save.
+ * EOP writes/reads can start anywhere in the ring.
+ */
+ if (get_amdgpu_device(kgd)->asic_type != CHIP_TONGA) {
+ WREG32(mmCP_HQD_EOP_RPTR, m->cp_hqd_eop_rptr);
+ WREG32(mmCP_HQD_EOP_WPTR, m->cp_hqd_eop_wptr);
+ WREG32(mmCP_HQD_EOP_WPTR_MEM, m->cp_hqd_eop_wptr_mem);
+ }
+
+ for (reg = mmCP_HQD_EOP_EVENTS; reg <= mmCP_HQD_ERROR; reg++)
+ WREG32(reg, mqd_hqd[reg - mmCP_MQD_BASE_ADDR]);
+
+ /* Copy userspace write pointer value to register.
+ * Activate doorbell logic to monitor subsequent changes.
+ */
+ data = REG_SET_FIELD(m->cp_hqd_pq_doorbell_control,
+ CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1);
+ WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, data);
+
+ if (read_user_wptr(mm, wptr, wptr_val))
+ WREG32(mmCP_HQD_PQ_WPTR, (wptr_val << wptr_shift) & wptr_mask);
+
+ data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1);
+ WREG32(mmCP_HQD_ACTIVE, data);
+
release_queue(kgd);
return 0;
@@ -308,29 +393,102 @@ static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
return false;
}
-static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
+static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
+ enum kfd_preempt_type reset_type,
unsigned int utimeout, uint32_t pipe_id,
uint32_t queue_id)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
uint32_t temp;
- int timeout = utimeout;
+ enum hqd_dequeue_request_type type;
+ unsigned long flags, end_jiffies;
+ int retry;
+ struct vi_mqd *m = get_mqd(mqd);
acquire_queue(kgd, pipe_id, queue_id);
- WREG32(mmCP_HQD_DEQUEUE_REQUEST, reset_type);
+ if (m->cp_hqd_vmid == 0)
+ WREG32_FIELD(RLC_CP_SCHEDULERS, scheduler1, 0);
+ switch (reset_type) {
+ case KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN:
+ type = DRAIN_PIPE;
+ break;
+ case KFD_PREEMPT_TYPE_WAVEFRONT_RESET:
+ type = RESET_WAVES;
+ break;
+ default:
+ type = DRAIN_PIPE;
+ break;
+ }
+
+ /* Workaround: If IQ timer is active and the wait time is close to or
+ * equal to 0, dequeueing is not safe. Wait until either the wait time
+ * is larger or timer is cleared. Also, ensure that IQ_REQ_PEND is
+ * cleared before continuing. Also, ensure wait times are set to at
+ * least 0x3.
+ */
+ local_irq_save(flags);
+ preempt_disable();
+ retry = 5000; /* wait for 500 usecs at maximum */
+ while (true) {
+ temp = RREG32(mmCP_HQD_IQ_TIMER);
+ if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, PROCESSING_IQ)) {
+ pr_debug("HW is processing IQ\n");
+ goto loop;
+ }
+ if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, ACTIVE)) {
+ if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, RETRY_TYPE)
+ == 3) /* SEM-rearm is safe */
+ break;
+ /* Wait time 3 is safe for CP, but our MMIO read/write
+ * time is close to 1 microsecond, so check for 10 to
+ * leave more buffer room
+ */
+ if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, WAIT_TIME)
+ >= 10)
+ break;
+ pr_debug("IQ timer is active\n");
+ } else
+ break;
+loop:
+ if (!retry) {
+ pr_err("CP HQD IQ timer status time out\n");
+ break;
+ }
+ ndelay(100);
+ --retry;
+ }
+ retry = 1000;
+ while (true) {
+ temp = RREG32(mmCP_HQD_DEQUEUE_REQUEST);
+ if (!(temp & CP_HQD_DEQUEUE_REQUEST__IQ_REQ_PEND_MASK))
+ break;
+ pr_debug("Dequeue request is pending\n");
+
+ if (!retry) {
+ pr_err("CP HQD dequeue request time out\n");
+ break;
+ }
+ ndelay(100);
+ --retry;
+ }
+ local_irq_restore(flags);
+ preempt_enable();
+
+ WREG32(mmCP_HQD_DEQUEUE_REQUEST, type);
+
+ end_jiffies = (utimeout * HZ / 1000) + jiffies;
while (true) {
temp = RREG32(mmCP_HQD_ACTIVE);
- if (temp & CP_HQD_ACTIVE__ACTIVE_MASK)
+ if (!(temp & CP_HQD_ACTIVE__ACTIVE_MASK))
break;
- if (timeout <= 0) {
- pr_err("kfd: cp queue preemption time out.\n");
+ if (time_after(jiffies, end_jiffies)) {
+ pr_err("cp queue preemption time out.\n");
release_queue(kgd);
return -ETIME;
}
- msleep(20);
- timeout -= 20;
+ usleep_range(500, 1000);
}
release_queue(kgd);
@@ -444,6 +602,16 @@ static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd,
return 0;
}
+static void set_scratch_backing_va(struct kgd_dev *kgd,
+ uint64_t va, uint32_t vmid)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
+
+ lock_srbm(kgd, 0, 0, 0, vmid);
+ WREG32(mmSH_HIDDEN_PRIVATE_BASE_VMID, va);
+ unlock_srbm(kgd);
+}
+
static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type)
{
struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
@@ -454,42 +622,42 @@ static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type)
switch (type) {
case KGD_ENGINE_PFP:
hdr = (const union amdgpu_firmware_header *)
- adev->gfx.pfp_fw->data;
+ adev->gfx.pfp_fw->data;
break;
case KGD_ENGINE_ME:
hdr = (const union amdgpu_firmware_header *)
- adev->gfx.me_fw->data;
+ adev->gfx.me_fw->data;
break;
case KGD_ENGINE_CE:
hdr = (const union amdgpu_firmware_header *)
- adev->gfx.ce_fw->data;
+ adev->gfx.ce_fw->data;
break;
case KGD_ENGINE_MEC1:
hdr = (const union amdgpu_firmware_header *)
- adev->gfx.mec_fw->data;
+ adev->gfx.mec_fw->data;
break;
case KGD_ENGINE_MEC2:
hdr = (const union amdgpu_firmware_header *)
- adev->gfx.mec2_fw->data;
+ adev->gfx.mec2_fw->data;
break;
case KGD_ENGINE_RLC:
hdr = (const union amdgpu_firmware_header *)
- adev->gfx.rlc_fw->data;
+ adev->gfx.rlc_fw->data;
break;
case KGD_ENGINE_SDMA1:
hdr = (const union amdgpu_firmware_header *)
- adev->sdma.instance[0].fw->data;
+ adev->sdma.instance[0].fw->data;
break;
case KGD_ENGINE_SDMA2:
hdr = (const union amdgpu_firmware_header *)
- adev->sdma.instance[1].fw->data;
+ adev->sdma.instance[1].fw->data;
break;
default:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
index 1e8e1123ddf4..ce443586a0c7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
@@ -1686,7 +1686,7 @@ void amdgpu_atombios_scratch_regs_lock(struct amdgpu_device *adev, bool lock)
{
uint32_t bios_6_scratch;
- bios_6_scratch = RREG32(mmBIOS_SCRATCH_6);
+ bios_6_scratch = RREG32(adev->bios_scratch_reg_offset + 6);
if (lock) {
bios_6_scratch |= ATOM_S6_CRITICAL_STATE;
@@ -1696,15 +1696,17 @@ void amdgpu_atombios_scratch_regs_lock(struct amdgpu_device *adev, bool lock)
bios_6_scratch |= ATOM_S6_ACC_MODE;
}
- WREG32(mmBIOS_SCRATCH_6, bios_6_scratch);
+ WREG32(adev->bios_scratch_reg_offset + 6, bios_6_scratch);
}
void amdgpu_atombios_scratch_regs_init(struct amdgpu_device *adev)
{
uint32_t bios_2_scratch, bios_6_scratch;
- bios_2_scratch = RREG32(mmBIOS_SCRATCH_2);
- bios_6_scratch = RREG32(mmBIOS_SCRATCH_6);
+ adev->bios_scratch_reg_offset = mmBIOS_SCRATCH_0;
+
+ bios_2_scratch = RREG32(adev->bios_scratch_reg_offset + 2);
+ bios_6_scratch = RREG32(adev->bios_scratch_reg_offset + 6);
/* let the bios control the backlight */
bios_2_scratch &= ~ATOM_S2_VRI_BRIGHT_ENABLE;
@@ -1715,8 +1717,8 @@ void amdgpu_atombios_scratch_regs_init(struct amdgpu_device *adev)
/* clear the vbios dpms state */
bios_2_scratch &= ~ATOM_S2_DEVICE_DPMS_STATE;
- WREG32(mmBIOS_SCRATCH_2, bios_2_scratch);
- WREG32(mmBIOS_SCRATCH_6, bios_6_scratch);
+ WREG32(adev->bios_scratch_reg_offset + 2, bios_2_scratch);
+ WREG32(adev->bios_scratch_reg_offset + 6, bios_6_scratch);
}
void amdgpu_atombios_scratch_regs_save(struct amdgpu_device *adev)
@@ -1724,7 +1726,7 @@ void amdgpu_atombios_scratch_regs_save(struct amdgpu_device *adev)
int i;
for (i = 0; i < AMDGPU_BIOS_NUM_SCRATCH; i++)
- adev->bios_scratch[i] = RREG32(mmBIOS_SCRATCH_0 + i);
+ adev->bios_scratch[i] = RREG32(adev->bios_scratch_reg_offset + i);
}
void amdgpu_atombios_scratch_regs_restore(struct amdgpu_device *adev)
@@ -1738,20 +1740,30 @@ void amdgpu_atombios_scratch_regs_restore(struct amdgpu_device *adev)
adev->bios_scratch[7] &= ~ATOM_S7_ASIC_INIT_COMPLETE_MASK;
for (i = 0; i < AMDGPU_BIOS_NUM_SCRATCH; i++)
- WREG32(mmBIOS_SCRATCH_0 + i, adev->bios_scratch[i]);
+ WREG32(adev->bios_scratch_reg_offset + i, adev->bios_scratch[i]);
}
void amdgpu_atombios_scratch_regs_engine_hung(struct amdgpu_device *adev,
bool hung)
{
- u32 tmp = RREG32(mmBIOS_SCRATCH_3);
+ u32 tmp = RREG32(adev->bios_scratch_reg_offset + 3);
if (hung)
tmp |= ATOM_S3_ASIC_GUI_ENGINE_HUNG;
else
tmp &= ~ATOM_S3_ASIC_GUI_ENGINE_HUNG;
- WREG32(mmBIOS_SCRATCH_3, tmp);
+ WREG32(adev->bios_scratch_reg_offset + 3, tmp);
+}
+
+bool amdgpu_atombios_scratch_need_asic_init(struct amdgpu_device *adev)
+{
+ u32 tmp = RREG32(adev->bios_scratch_reg_offset + 7);
+
+ if (tmp & ATOM_S7_ASIC_INIT_COMPLETE_MASK)
+ return false;
+ else
+ return true;
}
/* Atom needs data in little endian format
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
index 38d0fe32e5cd..b0d5d1d7fdba 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
@@ -200,6 +200,7 @@ void amdgpu_atombios_scratch_regs_save(struct amdgpu_device *adev);
void amdgpu_atombios_scratch_regs_restore(struct amdgpu_device *adev);
void amdgpu_atombios_scratch_regs_engine_hung(struct amdgpu_device *adev,
bool hung);
+bool amdgpu_atombios_scratch_need_asic_init(struct amdgpu_device *adev);
void amdgpu_atombios_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le);
int amdgpu_atombios_get_max_vddc(struct amdgpu_device *adev, u8 voltage_type,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
index 4bdda56fccee..f9ffe8ef0cd6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
@@ -66,41 +66,6 @@ void amdgpu_atomfirmware_scratch_regs_init(struct amdgpu_device *adev)
}
}
-void amdgpu_atomfirmware_scratch_regs_save(struct amdgpu_device *adev)
-{
- int i;
-
- for (i = 0; i < AMDGPU_BIOS_NUM_SCRATCH; i++)
- adev->bios_scratch[i] = RREG32(adev->bios_scratch_reg_offset + i);
-}
-
-void amdgpu_atomfirmware_scratch_regs_restore(struct amdgpu_device *adev)
-{
- int i;
-
- /*
- * VBIOS will check ASIC_INIT_COMPLETE bit to decide if
- * execute ASIC_Init posting via driver
- */
- adev->bios_scratch[7] &= ~ATOM_S7_ASIC_INIT_COMPLETE_MASK;
-
- for (i = 0; i < AMDGPU_BIOS_NUM_SCRATCH; i++)
- WREG32(adev->bios_scratch_reg_offset + i, adev->bios_scratch[i]);
-}
-
-void amdgpu_atomfirmware_scratch_regs_engine_hung(struct amdgpu_device *adev,
- bool hung)
-{
- u32 tmp = RREG32(adev->bios_scratch_reg_offset + 3);
-
- if (hung)
- tmp |= ATOM_S3_ASIC_GUI_ENGINE_HUNG;
- else
- tmp &= ~ATOM_S3_ASIC_GUI_ENGINE_HUNG;
-
- WREG32(adev->bios_scratch_reg_offset + 3, tmp);
-}
-
int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev)
{
struct atom_context *ctx = adev->mode_info.atom_context;
@@ -130,3 +95,129 @@ int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev)
ctx->scratch_size_bytes = usage_bytes;
return 0;
}
+
+union igp_info {
+ struct atom_integrated_system_info_v1_11 v11;
+};
+
+/*
+ * Return vram width from integrated system info table, if available,
+ * or 0 if not.
+ */
+int amdgpu_atomfirmware_get_vram_width(struct amdgpu_device *adev)
+{
+ struct amdgpu_mode_info *mode_info = &adev->mode_info;
+ int index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
+ integratedsysteminfo);
+ u16 data_offset, size;
+ union igp_info *igp_info;
+ u8 frev, crev;
+
+ /* get any igp specific overrides */
+ if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, &size,
+ &frev, &crev, &data_offset)) {
+ igp_info = (union igp_info *)
+ (mode_info->atom_context->bios + data_offset);
+ switch (crev) {
+ case 11:
+ return igp_info->v11.umachannelnumber * 64;
+ default:
+ return 0;
+ }
+ }
+
+ return 0;
+}
+
+union firmware_info {
+ struct atom_firmware_info_v3_1 v31;
+};
+
+union smu_info {
+ struct atom_smu_info_v3_1 v31;
+};
+
+union umc_info {
+ struct atom_umc_info_v3_1 v31;
+};
+
+int amdgpu_atomfirmware_get_clock_info(struct amdgpu_device *adev)
+{
+ struct amdgpu_mode_info *mode_info = &adev->mode_info;
+ struct amdgpu_pll *spll = &adev->clock.spll;
+ struct amdgpu_pll *mpll = &adev->clock.mpll;
+ uint8_t frev, crev;
+ uint16_t data_offset;
+ int ret = -EINVAL, index;
+
+ index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
+ firmwareinfo);
+ if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
+ &frev, &crev, &data_offset)) {
+ union firmware_info *firmware_info =
+ (union firmware_info *)(mode_info->atom_context->bios +
+ data_offset);
+
+ adev->clock.default_sclk =
+ le32_to_cpu(firmware_info->v31.bootup_sclk_in10khz);
+ adev->clock.default_mclk =
+ le32_to_cpu(firmware_info->v31.bootup_mclk_in10khz);
+
+ adev->pm.current_sclk = adev->clock.default_sclk;
+ adev->pm.current_mclk = adev->clock.default_mclk;
+
+ /* not technically a clock, but... */
+ adev->mode_info.firmware_flags =
+ le32_to_cpu(firmware_info->v31.firmware_capability);
+
+ ret = 0;
+ }
+
+ index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
+ smu_info);
+ if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
+ &frev, &crev, &data_offset)) {
+ union smu_info *smu_info =
+ (union smu_info *)(mode_info->atom_context->bios +
+ data_offset);
+
+ /* system clock */
+ spll->reference_freq = le32_to_cpu(smu_info->v31.core_refclk_10khz);
+
+ spll->reference_div = 0;
+ spll->min_post_div = 1;
+ spll->max_post_div = 1;
+ spll->min_ref_div = 2;
+ spll->max_ref_div = 0xff;
+ spll->min_feedback_div = 4;
+ spll->max_feedback_div = 0xff;
+ spll->best_vco = 0;
+
+ ret = 0;
+ }
+
+ index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
+ umc_info);
+ if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
+ &frev, &crev, &data_offset)) {
+ union umc_info *umc_info =
+ (union umc_info *)(mode_info->atom_context->bios +
+ data_offset);
+
+ /* memory clock */
+ mpll->reference_freq = le32_to_cpu(umc_info->v31.mem_refclk_10khz);
+
+ mpll->reference_div = 0;
+ mpll->min_post_div = 1;
+ mpll->max_post_div = 1;
+ mpll->min_ref_div = 2;
+ mpll->max_ref_div = 0xff;
+ mpll->min_feedback_div = 4;
+ mpll->max_feedback_div = 0xff;
+ mpll->best_vco = 0;
+
+ ret = 0;
+ }
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h
index a2c3ebe22c71..288b97e54347 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h
@@ -26,10 +26,8 @@
bool amdgpu_atomfirmware_gpu_supports_virtualization(struct amdgpu_device *adev);
void amdgpu_atomfirmware_scratch_regs_init(struct amdgpu_device *adev);
-void amdgpu_atomfirmware_scratch_regs_save(struct amdgpu_device *adev);
-void amdgpu_atomfirmware_scratch_regs_restore(struct amdgpu_device *adev);
-void amdgpu_atomfirmware_scratch_regs_engine_hung(struct amdgpu_device *adev,
- bool hung);
int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev);
+int amdgpu_atomfirmware_get_vram_width(struct amdgpu_device *adev);
+int amdgpu_atomfirmware_get_clock_info(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
index 1beae5b930d0..63ec1e1bb6aa 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
@@ -40,7 +40,7 @@ static int amdgpu_benchmark_do_move(struct amdgpu_device *adev, unsigned size,
for (i = 0; i < n; i++) {
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
r = amdgpu_copy_buffer(ring, saddr, daddr, size, NULL, &fence,
- false);
+ false, false);
if (r)
goto exit_do_move;
r = dma_fence_wait(fence, false);
@@ -81,7 +81,7 @@ static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size,
n = AMDGPU_BENCHMARK_ITERATIONS;
r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, sdomain, 0, NULL,
- NULL, &sobj);
+ NULL, 0, &sobj);
if (r) {
goto out_cleanup;
}
@@ -94,7 +94,7 @@ static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size,
goto out_cleanup;
}
r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, ddomain, 0, NULL,
- NULL, &dobj);
+ NULL, 0, &dobj);
if (r) {
goto out_cleanup;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
index 365e735f6647..c21adf60a7f2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
@@ -86,19 +86,6 @@ static bool check_atom_bios(uint8_t *bios, size_t size)
return false;
}
-static bool is_atom_fw(uint8_t *bios)
-{
- uint16_t bios_header_start = bios[0x48] | (bios[0x49] << 8);
- uint8_t frev = bios[bios_header_start + 2];
- uint8_t crev = bios[bios_header_start + 3];
-
- if ((frev < 3) ||
- ((frev == 3) && (crev < 3)))
- return false;
-
- return true;
-}
-
/* If you boot an IGP board with a discrete card as the primary,
* the IGP rom is not accessible via the rom bar as the IGP rom is
* part of the system bios. On boot, the system bios puts a
@@ -117,7 +104,7 @@ static bool igp_read_bios_from_vram(struct amdgpu_device *adev)
adev->bios = NULL;
vram_base = pci_resource_start(adev->pdev, 0);
- bios = ioremap(vram_base, size);
+ bios = ioremap_wc(vram_base, size);
if (!bios) {
return false;
}
@@ -455,6 +442,6 @@ bool amdgpu_get_bios(struct amdgpu_device *adev)
return false;
success:
- adev->is_atom_fw = is_atom_fw(adev->bios);
+ adev->is_atom_fw = (adev->asic_type >= CHIP_VEGA10) ? true : false;
return true;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
index 5e771bc11b00..59089e027f4d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
@@ -83,7 +83,7 @@ static int amdgpu_bo_list_create(struct amdgpu_device *adev,
r = idr_alloc(&fpriv->bo_list_handles, list, 1, 0, GFP_KERNEL);
mutex_unlock(&fpriv->bo_list_lock);
if (r < 0) {
- kfree(list);
+ amdgpu_bo_list_free(list);
return r;
}
*id = r;
@@ -136,7 +136,7 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev,
}
bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm);
if (usermm) {
@@ -156,11 +156,11 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev,
entry->tv.bo = &entry->robj->tbo;
entry->tv.shared = !entry->robj->prime_shared_count;
- if (entry->robj->prefered_domains == AMDGPU_GEM_DOMAIN_GDS)
+ if (entry->robj->preferred_domains == AMDGPU_GEM_DOMAIN_GDS)
gds_obj = entry->robj;
- if (entry->robj->prefered_domains == AMDGPU_GEM_DOMAIN_GWS)
+ if (entry->robj->preferred_domains == AMDGPU_GEM_DOMAIN_GWS)
gws_obj = entry->robj;
- if (entry->robj->prefered_domains == AMDGPU_GEM_DOMAIN_OA)
+ if (entry->robj->preferred_domains == AMDGPU_GEM_DOMAIN_OA)
oa_obj = entry->robj;
total_size += amdgpu_bo_size(entry->robj);
@@ -270,7 +270,7 @@ int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
struct amdgpu_fpriv *fpriv = filp->driver_priv;
union drm_amdgpu_bo_list *args = data;
uint32_t handle = args->in.list_handle;
- const void __user *uptr = (const void*)(uintptr_t)args->in.bo_info_ptr;
+ const void __user *uptr = u64_to_user_ptr(args->in.bo_info_ptr);
struct drm_amdgpu_bo_list_entry *info;
struct amdgpu_bo_list *list;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index c0a806280257..fd435a96481c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -124,7 +124,7 @@ static int amdgpu_cgs_alloc_gpu_mem(struct cgs_device *cgs_device,
ret = amdgpu_bo_create_restricted(adev, size, PAGE_SIZE,
true, domain, flags,
NULL, &placement, NULL,
- &obj);
+ 0, &obj);
if (ret) {
DRM_ERROR("(%d) bo create failed\n", ret);
return ret;
@@ -166,7 +166,7 @@ static int amdgpu_cgs_gmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t h
r = amdgpu_bo_reserve(obj, true);
if (unlikely(r != 0))
return r;
- r = amdgpu_bo_pin_restricted(obj, obj->prefered_domains,
+ r = amdgpu_bo_pin_restricted(obj, obj->preferred_domains,
min_offset, max_offset, mcaddr);
amdgpu_bo_unreserve(obj);
return r;
@@ -240,6 +240,8 @@ static uint32_t amdgpu_cgs_read_ind_register(struct cgs_device *cgs_device,
return RREG32_DIDT(index);
case CGS_IND_REG_GC_CAC:
return RREG32_GC_CAC(index);
+ case CGS_IND_REG_SE_CAC:
+ return RREG32_SE_CAC(index);
case CGS_IND_REG__AUDIO_ENDPT:
DRM_ERROR("audio endpt register access not implemented.\n");
return 0;
@@ -266,6 +268,8 @@ static void amdgpu_cgs_write_ind_register(struct cgs_device *cgs_device,
return WREG32_DIDT(index, value);
case CGS_IND_REG_GC_CAC:
return WREG32_GC_CAC(index, value);
+ case CGS_IND_REG_SE_CAC:
+ return WREG32_SE_CAC(index, value);
case CGS_IND_REG__AUDIO_ENDPT:
DRM_ERROR("audio endpt register access not implemented.\n");
return;
@@ -610,6 +614,17 @@ static int amdgpu_cgs_enter_safe_mode(struct cgs_device *cgs_device,
return 0;
}
+static void amdgpu_cgs_lock_grbm_idx(struct cgs_device *cgs_device,
+ bool lock)
+{
+ CGS_FUNC_ADEV;
+
+ if (lock)
+ mutex_lock(&adev->grbm_idx_mutex);
+ else
+ mutex_unlock(&adev->grbm_idx_mutex);
+}
+
static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
enum cgs_ucode_id type,
struct cgs_firmware_info *info)
@@ -644,7 +659,7 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
info->version = (uint16_t)le32_to_cpu(header->header.ucode_version);
if (CGS_UCODE_ID_CP_MEC == type)
- info->image_size = (header->jt_offset) << 2;
+ info->image_size = le32_to_cpu(header->jt_offset) << 2;
info->fw_version = amdgpu_get_firmware_version(cgs_device, type);
info->feature_version = (uint16_t)le32_to_cpu(header->ucode_feature_version);
@@ -719,7 +734,13 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
strcpy(fw_name, "amdgpu/polaris12_smc.bin");
break;
case CHIP_VEGA10:
- strcpy(fw_name, "amdgpu/vega10_smc.bin");
+ if ((adev->pdev->device == 0x687f) &&
+ ((adev->pdev->revision == 0xc0) ||
+ (adev->pdev->revision == 0xc1) ||
+ (adev->pdev->revision == 0xc3)))
+ strcpy(fw_name, "amdgpu/vega10_acg_smc.bin");
+ else
+ strcpy(fw_name, "amdgpu/vega10_smc.bin");
break;
default:
DRM_ERROR("SMC firmware not supported\n");
@@ -1117,6 +1138,7 @@ static const struct cgs_ops amdgpu_cgs_ops = {
.query_system_info = amdgpu_cgs_query_system_info,
.is_virtualization_enabled = amdgpu_cgs_is_virtualization_enabled,
.enter_safe_mode = amdgpu_cgs_enter_safe_mode,
+ .lock_grbm_idx = amdgpu_cgs_lock_grbm_idx,
};
static const struct cgs_os_ops amdgpu_cgs_os_ops = {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 5599c01b265d..60d8bedb694d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -54,7 +54,7 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
*offset = data->offset;
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
if (amdgpu_ttm_tt_get_usermm(p->uf_entry.robj->tbo.ttm)) {
amdgpu_bo_unref(&p->uf_entry.robj);
@@ -90,7 +90,7 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
}
/* get chunks */
- chunk_array_user = (uint64_t __user *)(uintptr_t)(cs->in.chunks);
+ chunk_array_user = u64_to_user_ptr(cs->in.chunks);
if (copy_from_user(chunk_array, chunk_array_user,
sizeof(uint64_t)*cs->in.num_chunks)) {
ret = -EFAULT;
@@ -110,7 +110,7 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
struct drm_amdgpu_cs_chunk user_chunk;
uint32_t __user *cdata;
- chunk_ptr = (void __user *)(uintptr_t)chunk_array[i];
+ chunk_ptr = u64_to_user_ptr(chunk_array[i]);
if (copy_from_user(&user_chunk, chunk_ptr,
sizeof(struct drm_amdgpu_cs_chunk))) {
ret = -EFAULT;
@@ -121,7 +121,7 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
p->chunks[i].length_dw = user_chunk.length_dw;
size = p->chunks[i].length_dw;
- cdata = (void __user *)(uintptr_t)user_chunk.chunk_data;
+ cdata = u64_to_user_ptr(user_chunk.chunk_data);
p->chunks[i].kdata = kvmalloc_array(size, sizeof(uint32_t), GFP_KERNEL);
if (p->chunks[i].kdata == NULL) {
@@ -223,10 +223,11 @@ static s64 bytes_to_us(struct amdgpu_device *adev, u64 bytes)
* ticks. The accumulated microseconds (us) are converted to bytes and
* returned.
*/
-static u64 amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev)
+static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev,
+ u64 *max_bytes,
+ u64 *max_vis_bytes)
{
s64 time_us, increment_us;
- u64 max_bytes;
u64 free_vram, total_vram, used_vram;
/* Allow a maximum of 200 accumulated ms. This is basically per-IB
@@ -238,11 +239,14 @@ static u64 amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev)
*/
const s64 us_upper_bound = 200000;
- if (!adev->mm_stats.log2_max_MBps)
- return 0;
+ if (!adev->mm_stats.log2_max_MBps) {
+ *max_bytes = 0;
+ *max_vis_bytes = 0;
+ return;
+ }
total_vram = adev->mc.real_vram_size - adev->vram_pin_size;
- used_vram = atomic64_read(&adev->vram_usage);
+ used_vram = amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
free_vram = used_vram >= total_vram ? 0 : total_vram - used_vram;
spin_lock(&adev->mm_stats.lock);
@@ -280,23 +284,46 @@ static u64 amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev)
adev->mm_stats.accum_us = max(min_us, adev->mm_stats.accum_us);
}
- /* This returns 0 if the driver is in debt to disallow (optional)
+ /* This is set to 0 if the driver is in debt to disallow (optional)
* buffer moves.
*/
- max_bytes = us_to_bytes(adev, adev->mm_stats.accum_us);
+ *max_bytes = us_to_bytes(adev, adev->mm_stats.accum_us);
+
+ /* Do the same for visible VRAM if half of it is free */
+ if (adev->mc.visible_vram_size < adev->mc.real_vram_size) {
+ u64 total_vis_vram = adev->mc.visible_vram_size;
+ u64 used_vis_vram =
+ amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
+
+ if (used_vis_vram < total_vis_vram) {
+ u64 free_vis_vram = total_vis_vram - used_vis_vram;
+ adev->mm_stats.accum_us_vis = min(adev->mm_stats.accum_us_vis +
+ increment_us, us_upper_bound);
+
+ if (free_vis_vram >= total_vis_vram / 2)
+ adev->mm_stats.accum_us_vis =
+ max(bytes_to_us(adev, free_vis_vram / 2),
+ adev->mm_stats.accum_us_vis);
+ }
+
+ *max_vis_bytes = us_to_bytes(adev, adev->mm_stats.accum_us_vis);
+ } else {
+ *max_vis_bytes = 0;
+ }
spin_unlock(&adev->mm_stats.lock);
- return max_bytes;
}
/* Report how many bytes have really been moved for the last command
* submission. This can result in a debt that can stop buffer migrations
* temporarily.
*/
-void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes)
+void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes,
+ u64 num_vis_bytes)
{
spin_lock(&adev->mm_stats.lock);
adev->mm_stats.accum_us -= bytes_to_us(adev, num_bytes);
+ adev->mm_stats.accum_us_vis -= bytes_to_us(adev, num_vis_bytes);
spin_unlock(&adev->mm_stats.lock);
}
@@ -304,7 +331,7 @@ static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
struct amdgpu_bo *bo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
- u64 initial_bytes_moved;
+ u64 initial_bytes_moved, bytes_moved;
uint32_t domain;
int r;
@@ -314,17 +341,35 @@ static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
/* Don't move this buffer if we have depleted our allowance
* to move it. Don't move anything if the threshold is zero.
*/
- if (p->bytes_moved < p->bytes_moved_threshold)
- domain = bo->prefered_domains;
- else
+ if (p->bytes_moved < p->bytes_moved_threshold) {
+ if (adev->mc.visible_vram_size < adev->mc.real_vram_size &&
+ (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)) {
+ /* And don't move a CPU_ACCESS_REQUIRED BO to limited
+ * visible VRAM if we've depleted our allowance to do
+ * that.
+ */
+ if (p->bytes_moved_vis < p->bytes_moved_vis_threshold)
+ domain = bo->preferred_domains;
+ else
+ domain = bo->allowed_domains;
+ } else {
+ domain = bo->preferred_domains;
+ }
+ } else {
domain = bo->allowed_domains;
+ }
retry:
amdgpu_ttm_placement_from_domain(bo, domain);
initial_bytes_moved = atomic64_read(&adev->num_bytes_moved);
r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
- p->bytes_moved += atomic64_read(&adev->num_bytes_moved) -
- initial_bytes_moved;
+ bytes_moved = atomic64_read(&adev->num_bytes_moved) -
+ initial_bytes_moved;
+ p->bytes_moved += bytes_moved;
+ if (adev->mc.visible_vram_size < adev->mc.real_vram_size &&
+ bo->tbo.mem.mem_type == TTM_PL_VRAM &&
+ bo->tbo.mem.start < adev->mc.visible_vram_size >> PAGE_SHIFT)
+ p->bytes_moved_vis += bytes_moved;
if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
domain = bo->allowed_domains;
@@ -350,7 +395,8 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
struct amdgpu_bo_list_entry *candidate = p->evictable;
struct amdgpu_bo *bo = candidate->robj;
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
- u64 initial_bytes_moved;
+ u64 initial_bytes_moved, bytes_moved;
+ bool update_bytes_moved_vis;
uint32_t other;
/* If we reached our current BO we can forget it */
@@ -370,10 +416,17 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
/* Good we can try to move this BO somewhere else */
amdgpu_ttm_placement_from_domain(bo, other);
+ update_bytes_moved_vis =
+ adev->mc.visible_vram_size < adev->mc.real_vram_size &&
+ bo->tbo.mem.mem_type == TTM_PL_VRAM &&
+ bo->tbo.mem.start < adev->mc.visible_vram_size >> PAGE_SHIFT;
initial_bytes_moved = atomic64_read(&adev->num_bytes_moved);
r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
- p->bytes_moved += atomic64_read(&adev->num_bytes_moved) -
+ bytes_moved = atomic64_read(&adev->num_bytes_moved) -
initial_bytes_moved;
+ p->bytes_moved += bytes_moved;
+ if (update_bytes_moved_vis)
+ p->bytes_moved_vis += bytes_moved;
if (unlikely(r))
break;
@@ -554,8 +607,10 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
list_splice(&need_pages, &p->validated);
}
- p->bytes_moved_threshold = amdgpu_cs_get_threshold_for_moves(p->adev);
+ amdgpu_cs_get_threshold_for_moves(p->adev, &p->bytes_moved_threshold,
+ &p->bytes_moved_vis_threshold);
p->bytes_moved = 0;
+ p->bytes_moved_vis = 0;
p->evictable = list_last_entry(&p->validated,
struct amdgpu_bo_list_entry,
tv.head);
@@ -579,8 +634,8 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
goto error_validate;
}
- amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved);
-
+ amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved,
+ p->bytes_moved_vis);
fpriv->vm.last_eviction_counter =
atomic64_read(&p->adev->num_evictions);
@@ -619,10 +674,8 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
}
error_validate:
- if (r) {
- amdgpu_vm_move_pt_bos_in_lru(p->adev, &fpriv->vm);
+ if (r)
ttm_eu_backoff_reservation(&p->ticket, &p->validated);
- }
error_free_pages:
@@ -670,21 +723,18 @@ static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
* If error is set than unvalidate buffer, otherwise just free memory
* used by parsing context.
**/
-static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bool backoff)
+static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error,
+ bool backoff)
{
- struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
unsigned i;
- if (!error) {
- amdgpu_vm_move_pt_bos_in_lru(parser->adev, &fpriv->vm);
-
+ if (!error)
ttm_eu_fence_buffer_objects(&parser->ticket,
&parser->validated,
parser->fence);
- } else if (backoff) {
+ else if (backoff)
ttm_eu_backoff_reservation(&parser->ticket,
&parser->validated);
- }
for (i = 0; i < parser->num_post_dep_syncobjs; i++)
drm_syncobj_put(parser->post_dep_syncobjs[i]);
@@ -737,7 +787,8 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p)
if (amdgpu_sriov_vf(adev)) {
struct dma_fence *f;
- bo_va = vm->csa_bo_va;
+
+ bo_va = fpriv->csa_va;
BUG_ON(!bo_va);
r = amdgpu_vm_bo_update(adev, bo_va, false);
if (r)
@@ -774,7 +825,7 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p)
}
- r = amdgpu_vm_clear_invalids(adev, vm, &p->job->sync);
+ r = amdgpu_vm_clear_moved(adev, vm, &p->job->sync);
if (amdgpu_vm_debug && p->bo_list) {
/* Invalidate all BOs to test for userspace bugs */
@@ -984,7 +1035,7 @@ static int amdgpu_syncobj_lookup_and_add_to_sync(struct amdgpu_cs_parser *p,
{
int r;
struct dma_fence *fence;
- r = drm_syncobj_fence_get(p->filp, handle, &fence);
+ r = drm_syncobj_find_fence(p->filp, handle, &fence);
if (r)
return r;
@@ -1028,6 +1079,9 @@ static int amdgpu_cs_process_syncobj_out_dep(struct amdgpu_cs_parser *p,
GFP_KERNEL);
p->num_post_dep_syncobjs = 0;
+ if (!p->post_dep_syncobjs)
+ return -ENOMEM;
+
for (i = 0; i < num_deps; ++i) {
p->post_dep_syncobjs[i] = drm_syncobj_find(p->filp, deps[i].handle);
if (!p->post_dep_syncobjs[i])
@@ -1099,7 +1153,6 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
cs->out.handle = amdgpu_ctx_add_fence(p->ctx, ring, p->fence);
job->uf_sequence = cs->out.handle;
amdgpu_job_free_resources(job);
- amdgpu_cs_parser_fini(p, 0, true);
trace_amdgpu_cs_ioctl(job);
amd_sched_entity_push_job(&job->base);
@@ -1157,10 +1210,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
goto out;
r = amdgpu_cs_submit(&parser, cs);
- if (r)
- goto out;
- return 0;
out:
amdgpu_cs_parser_fini(&parser, r, reserved_buffers);
return r;
@@ -1383,7 +1433,7 @@ int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
if (fences == NULL)
return -ENOMEM;
- fences_user = (void __user *)(uintptr_t)(wait->in.fences);
+ fences_user = u64_to_user_ptr(wait->in.fences);
if (copy_from_user(fences, fences_user,
sizeof(struct drm_amdgpu_fence) * fence_count)) {
r = -EFAULT;
@@ -1436,7 +1486,7 @@ amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
addr > mapping->last)
continue;
- *bo = lobj->bo_va->bo;
+ *bo = lobj->bo_va->base.bo;
return mapping;
}
@@ -1445,7 +1495,7 @@ amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
addr > mapping->last)
continue;
- *bo = lobj->bo_va->bo;
+ *bo = lobj->bo_va->base.bo;
return mapping;
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 4a8fc15467cf..e630d918fefc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -53,6 +53,9 @@
#include "bif/bif_4_1_d.h"
#include <linux/pci.h>
#include <linux/firmware.h>
+#include "amdgpu_vf_error.h"
+
+#include "amdgpu_amdkfd.h"
MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
@@ -128,6 +131,10 @@ void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
{
trace_amdgpu_mm_wreg(adev->pdev->device, reg, v);
+ if (adev->asic_type >= CHIP_VEGA10 && reg == 0) {
+ adev->last_mm_index = v;
+ }
+
if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)) {
BUG_ON(in_interrupt());
return amdgpu_virt_kiq_wreg(adev, reg, v);
@@ -143,6 +150,10 @@ void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
writel(v, ((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
}
+
+ if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) {
+ udelay(500);
+ }
}
u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg)
@@ -157,6 +168,9 @@ u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg)
void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
{
+ if (adev->asic_type >= CHIP_VEGA10 && reg == 0) {
+ adev->last_mm_index = v;
+ }
if ((reg * 4) < adev->rio_mem_size)
iowrite32(v, adev->rio_mem + (reg * 4));
@@ -164,6 +178,10 @@ void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
iowrite32(v, adev->rio_mem + (mmMM_DATA * 4));
}
+
+ if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) {
+ udelay(500);
+ }
}
/**
@@ -318,51 +336,16 @@ static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
static int amdgpu_vram_scratch_init(struct amdgpu_device *adev)
{
- int r;
-
- if (adev->vram_scratch.robj == NULL) {
- r = amdgpu_bo_create(adev, AMDGPU_GPU_PAGE_SIZE,
- PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
- AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
- NULL, NULL, &adev->vram_scratch.robj);
- if (r) {
- return r;
- }
- }
-
- r = amdgpu_bo_reserve(adev->vram_scratch.robj, false);
- if (unlikely(r != 0))
- return r;
- r = amdgpu_bo_pin(adev->vram_scratch.robj,
- AMDGPU_GEM_DOMAIN_VRAM, &adev->vram_scratch.gpu_addr);
- if (r) {
- amdgpu_bo_unreserve(adev->vram_scratch.robj);
- return r;
- }
- r = amdgpu_bo_kmap(adev->vram_scratch.robj,
- (void **)&adev->vram_scratch.ptr);
- if (r)
- amdgpu_bo_unpin(adev->vram_scratch.robj);
- amdgpu_bo_unreserve(adev->vram_scratch.robj);
-
- return r;
+ return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE,
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
+ &adev->vram_scratch.robj,
+ &adev->vram_scratch.gpu_addr,
+ (void **)&adev->vram_scratch.ptr);
}
static void amdgpu_vram_scratch_fini(struct amdgpu_device *adev)
{
- int r;
-
- if (adev->vram_scratch.robj == NULL) {
- return;
- }
- r = amdgpu_bo_reserve(adev->vram_scratch.robj, true);
- if (likely(r == 0)) {
- amdgpu_bo_kunmap(adev->vram_scratch.robj);
- amdgpu_bo_unpin(adev->vram_scratch.robj);
- amdgpu_bo_unreserve(adev->vram_scratch.robj);
- }
- amdgpu_bo_unref(&adev->vram_scratch.robj);
+ amdgpu_bo_free_kernel(&adev->vram_scratch.robj, NULL, NULL);
}
/**
@@ -521,7 +504,8 @@ static int amdgpu_wb_init(struct amdgpu_device *adev)
int r;
if (adev->wb.wb_obj == NULL) {
- r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t),
+ /* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */
+ r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8,
PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
&adev->wb.wb_obj, &adev->wb.gpu_addr,
(void **)&adev->wb.wb);
@@ -552,32 +536,10 @@ static int amdgpu_wb_init(struct amdgpu_device *adev)
int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb)
{
unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
- if (offset < adev->wb.num_wb) {
- __set_bit(offset, adev->wb.used);
- *wb = offset;
- return 0;
- } else {
- return -EINVAL;
- }
-}
-/**
- * amdgpu_wb_get_64bit - Allocate a wb entry
- *
- * @adev: amdgpu_device pointer
- * @wb: wb index
- *
- * Allocate a wb slot for use by the driver (all asics).
- * Returns 0 on success or -EINVAL on failure.
- */
-int amdgpu_wb_get_64bit(struct amdgpu_device *adev, u32 *wb)
-{
- unsigned long offset = bitmap_find_next_zero_area_off(adev->wb.used,
- adev->wb.num_wb, 0, 2, 7, 0);
- if ((offset + 1) < adev->wb.num_wb) {
+ if (offset < adev->wb.num_wb) {
__set_bit(offset, adev->wb.used);
- __set_bit(offset + 1, adev->wb.used);
- *wb = offset;
+ *wb = offset * 8; /* convert to dw offset */
return 0;
} else {
return -EINVAL;
@@ -599,22 +561,6 @@ void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb)
}
/**
- * amdgpu_wb_free_64bit - Free a wb entry
- *
- * @adev: amdgpu_device pointer
- * @wb: wb index
- *
- * Free a wb slot allocated for use by the driver (all asics)
- */
-void amdgpu_wb_free_64bit(struct amdgpu_device *adev, u32 wb)
-{
- if ((wb + 1) < adev->wb.num_wb) {
- __clear_bit(wb, adev->wb.used);
- __clear_bit(wb + 1, adev->wb.used);
- }
-}
-
-/**
* amdgpu_vram_location - try to find VRAM location
* @adev: amdgpu device structure holding all necessary informations
* @mc: memory controller structure holding memory informations
@@ -665,7 +611,7 @@ void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64
}
/**
- * amdgpu_gtt_location - try to find GTT location
+ * amdgpu_gart_location - try to find GTT location
* @adev: amdgpu device structure holding all necessary informations
* @mc: memory controller structure holding memory informations
*
@@ -676,28 +622,28 @@ void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64
*
* FIXME: when reducing GTT size align new size on power of 2.
*/
-void amdgpu_gtt_location(struct amdgpu_device *adev, struct amdgpu_mc *mc)
+void amdgpu_gart_location(struct amdgpu_device *adev, struct amdgpu_mc *mc)
{
u64 size_af, size_bf;
- size_af = ((adev->mc.mc_mask - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
- size_bf = mc->vram_start & ~mc->gtt_base_align;
+ size_af = adev->mc.mc_mask - mc->vram_end;
+ size_bf = mc->vram_start;
if (size_bf > size_af) {
- if (mc->gtt_size > size_bf) {
+ if (mc->gart_size > size_bf) {
dev_warn(adev->dev, "limiting GTT\n");
- mc->gtt_size = size_bf;
+ mc->gart_size = size_bf;
}
- mc->gtt_start = 0;
+ mc->gart_start = 0;
} else {
- if (mc->gtt_size > size_af) {
+ if (mc->gart_size > size_af) {
dev_warn(adev->dev, "limiting GTT\n");
- mc->gtt_size = size_af;
+ mc->gart_size = size_af;
}
- mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
+ mc->gart_start = mc->vram_end + 1;
}
- mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
+ mc->gart_end = mc->gart_start + mc->gart_size - 1;
dev_info(adev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
- mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
+ mc->gart_size >> 20, mc->gart_start, mc->gart_end);
}
/*
@@ -720,7 +666,12 @@ bool amdgpu_need_post(struct amdgpu_device *adev)
adev->has_hw_reset = false;
return true;
}
- /* then check MEM_SIZE, in case the crtcs are off */
+
+ /* bios scratch used on CIK+ */
+ if (adev->asic_type >= CHIP_BONAIRE)
+ return amdgpu_atombios_scratch_need_asic_init(adev);
+
+ /* check MEM_SIZE for older asics */
reg = amdgpu_asic_get_config_memsize(adev);
if ((reg != 0) && (reg != 0xffffffff))
@@ -1031,19 +982,6 @@ static unsigned int amdgpu_vga_set_decode(void *cookie, bool state)
return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
}
-/**
- * amdgpu_check_pot_argument - check that argument is a power of two
- *
- * @arg: value to check
- *
- * Validates that a certain argument is a power of two (all asics).
- * Returns true if argument is valid.
- */
-static bool amdgpu_check_pot_argument(int arg)
-{
- return (arg & (arg - 1)) == 0;
-}
-
static void amdgpu_check_block_size(struct amdgpu_device *adev)
{
/* defines number of bits in page table versus page directory,
@@ -1077,7 +1015,7 @@ static void amdgpu_check_vm_size(struct amdgpu_device *adev)
if (amdgpu_vm_size == -1)
return;
- if (!amdgpu_check_pot_argument(amdgpu_vm_size)) {
+ if (!is_power_of_2(amdgpu_vm_size)) {
dev_warn(adev->dev, "VM size (%d) must be a power of 2\n",
amdgpu_vm_size);
goto def_value;
@@ -1118,19 +1056,31 @@ static void amdgpu_check_arguments(struct amdgpu_device *adev)
dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
amdgpu_sched_jobs);
amdgpu_sched_jobs = 4;
- } else if (!amdgpu_check_pot_argument(amdgpu_sched_jobs)){
+ } else if (!is_power_of_2(amdgpu_sched_jobs)){
dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
amdgpu_sched_jobs);
amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
}
- if (amdgpu_gart_size != -1) {
+ if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) {
+ /* gart size must be greater or equal to 32M */
+ dev_warn(adev->dev, "gart size (%d) too small\n",
+ amdgpu_gart_size);
+ amdgpu_gart_size = -1;
+ }
+
+ if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) {
/* gtt size must be greater or equal to 32M */
- if (amdgpu_gart_size < 32) {
- dev_warn(adev->dev, "gart size (%d) too small\n",
- amdgpu_gart_size);
- amdgpu_gart_size = -1;
- }
+ dev_warn(adev->dev, "gtt size (%d) too small\n",
+ amdgpu_gtt_size);
+ amdgpu_gtt_size = -1;
+ }
+
+ /* valid range is between 4 and 9 inclusive */
+ if (amdgpu_vm_fragment_size != -1 &&
+ (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) {
+ dev_warn(adev->dev, "valid range is between 4 and 9\n");
+ amdgpu_vm_fragment_size = -1;
}
amdgpu_check_vm_size(adev);
@@ -1138,7 +1088,7 @@ static void amdgpu_check_arguments(struct amdgpu_device *adev)
amdgpu_check_block_size(adev);
if (amdgpu_vram_page_split != -1 && (amdgpu_vram_page_split < 16 ||
- !amdgpu_check_pot_argument(amdgpu_vram_page_split))) {
+ !is_power_of_2(amdgpu_vram_page_split))) {
dev_warn(adev->dev, "invalid VRAM page split (%d)\n",
amdgpu_vram_page_split);
amdgpu_vram_page_split = 1024;
@@ -1901,7 +1851,8 @@ static int amdgpu_sriov_reinit_late(struct amdgpu_device *adev)
AMD_IP_BLOCK_TYPE_DCE,
AMD_IP_BLOCK_TYPE_GFX,
AMD_IP_BLOCK_TYPE_SDMA,
- AMD_IP_BLOCK_TYPE_VCE,
+ AMD_IP_BLOCK_TYPE_UVD,
+ AMD_IP_BLOCK_TYPE_VCE
};
for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
@@ -2019,7 +1970,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
adev->flags = flags;
adev->asic_type = flags & AMD_ASIC_MASK;
adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
- adev->mc.gtt_size = 512 * 1024 * 1024;
+ adev->mc.gart_size = 512 * 1024 * 1024;
adev->accel_working = false;
adev->num_rings = 0;
adev->mman.buffer_funcs = NULL;
@@ -2068,6 +2019,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
spin_lock_init(&adev->uvd_ctx_idx_lock);
spin_lock_init(&adev->didt_idx_lock);
spin_lock_init(&adev->gc_cac_idx_lock);
+ spin_lock_init(&adev->se_cac_idx_lock);
spin_lock_init(&adev->audio_endpt_idx_lock);
spin_lock_init(&adev->mm_stats.lock);
@@ -2143,6 +2095,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
r = amdgpu_atombios_init(adev);
if (r) {
dev_err(adev->dev, "amdgpu_atombios_init failed\n");
+ amdgpu_vf_error_put(AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
goto failed;
}
@@ -2153,6 +2106,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
if (amdgpu_vpost_needed(adev)) {
if (!adev->bios) {
dev_err(adev->dev, "no vBIOS found\n");
+ amdgpu_vf_error_put(AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
r = -EINVAL;
goto failed;
}
@@ -2160,18 +2114,28 @@ int amdgpu_device_init(struct amdgpu_device *adev,
r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
if (r) {
dev_err(adev->dev, "gpu post error!\n");
+ amdgpu_vf_error_put(AMDGIM_ERROR_VF_GPU_POST_ERROR, 0, 0);
goto failed;
}
} else {
DRM_INFO("GPU post is not needed\n");
}
- if (!adev->is_atom_fw) {
+ if (adev->is_atom_fw) {
+ /* Initialize clocks */
+ r = amdgpu_atomfirmware_get_clock_info(adev);
+ if (r) {
+ dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
+ amdgpu_vf_error_put(AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
+ goto failed;
+ }
+ } else {
/* Initialize clocks */
r = amdgpu_atombios_get_clock_info(adev);
if (r) {
dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
- return r;
+ amdgpu_vf_error_put(AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
+ goto failed;
}
/* init i2c buses */
amdgpu_atombios_i2c_init(adev);
@@ -2181,6 +2145,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
r = amdgpu_fence_driver_init(adev);
if (r) {
dev_err(adev->dev, "amdgpu_fence_driver_init failed\n");
+ amdgpu_vf_error_put(AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
goto failed;
}
@@ -2190,6 +2155,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
r = amdgpu_init(adev);
if (r) {
dev_err(adev->dev, "amdgpu_init failed\n");
+ amdgpu_vf_error_put(AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
amdgpu_fini(adev);
goto failed;
}
@@ -2209,6 +2175,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
r = amdgpu_ib_pool_init(adev);
if (r) {
dev_err(adev->dev, "IB initialization failed (%d).\n", r);
+ amdgpu_vf_error_put(AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
goto failed;
}
@@ -2253,12 +2220,14 @@ int amdgpu_device_init(struct amdgpu_device *adev,
r = amdgpu_late_init(adev);
if (r) {
dev_err(adev->dev, "amdgpu_late_init failed\n");
+ amdgpu_vf_error_put(AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
goto failed;
}
return 0;
failed:
+ amdgpu_vf_error_trans_all(adev);
if (runtime)
vga_switcheroo_fini_domain_pm_ops(adev->dev);
return r;
@@ -2351,6 +2320,8 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
}
drm_modeset_unlock_all(dev);
+ amdgpu_amdkfd_suspend(adev);
+
/* unpin the front buffers and cursors */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
@@ -2392,10 +2363,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
*/
amdgpu_bo_evict_vram(adev);
- if (adev->is_atom_fw)
- amdgpu_atomfirmware_scratch_regs_save(adev);
- else
- amdgpu_atombios_scratch_regs_save(adev);
+ amdgpu_atombios_scratch_regs_save(adev);
pci_save_state(dev->pdev);
if (suspend) {
/* Shut down the device */
@@ -2444,10 +2412,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
if (r)
goto unlock;
}
- if (adev->is_atom_fw)
- amdgpu_atomfirmware_scratch_regs_restore(adev);
- else
- amdgpu_atombios_scratch_regs_restore(adev);
+ amdgpu_atombios_scratch_regs_restore(adev);
/* post card */
if (amdgpu_need_post(adev)) {
@@ -2490,6 +2455,9 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
}
}
}
+ r = amdgpu_amdkfd_resume(adev);
+ if (r)
+ return r;
/* blat the mode back in */
if (fbcon) {
@@ -2654,12 +2622,6 @@ static int amdgpu_recover_vram_from_shadow(struct amdgpu_device *adev,
goto err;
}
- r = amdgpu_ttm_bind(&bo->shadow->tbo, &bo->shadow->tbo.mem);
- if (r) {
- DRM_ERROR("%p bind failed\n", bo->shadow);
- goto err;
- }
-
r = amdgpu_bo_restore_from_shadow(adev, ring, bo,
NULL, fence, true);
if (r) {
@@ -2860,21 +2822,9 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev)
r = amdgpu_suspend(adev);
retry:
- /* Disable fb access */
- if (adev->mode_info.num_crtc) {
- struct amdgpu_mode_mc_save save;
- amdgpu_display_stop_mc_access(adev, &save);
- amdgpu_wait_for_idle(adev, AMD_IP_BLOCK_TYPE_GMC);
- }
- if (adev->is_atom_fw)
- amdgpu_atomfirmware_scratch_regs_save(adev);
- else
- amdgpu_atombios_scratch_regs_save(adev);
+ amdgpu_atombios_scratch_regs_save(adev);
r = amdgpu_asic_reset(adev);
- if (adev->is_atom_fw)
- amdgpu_atomfirmware_scratch_regs_restore(adev);
- else
- amdgpu_atombios_scratch_regs_restore(adev);
+ amdgpu_atombios_scratch_regs_restore(adev);
/* post card */
amdgpu_atom_asic_init(adev->mode_info.atom_context);
@@ -2952,6 +2902,7 @@ out:
}
} else {
dev_err(adev->dev, "asic resume failed (%d).\n", r);
+ amdgpu_vf_error_put(AMDGIM_ERROR_VF_ASIC_RESUME_FAIL, 0, r);
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
if (adev->rings[i] && adev->rings[i]->sched.thread) {
kthread_unpark(adev->rings[i]->sched.thread);
@@ -2962,12 +2913,16 @@ out:
drm_helper_resume_force_mode(adev->ddev);
ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
- if (r)
+ if (r) {
/* bad news, how to tell it to userspace ? */
dev_info(adev->dev, "GPU reset failed\n");
- else
+ amdgpu_vf_error_put(AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
+ }
+ else {
dev_info(adev->dev, "GPU reset successed!\n");
+ }
+ amdgpu_vf_error_trans_all(adev);
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index cdf2ab20166a..6ad243293a78 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -482,7 +482,7 @@ static void amdgpu_user_framebuffer_destroy(struct drm_framebuffer *fb)
{
struct amdgpu_framebuffer *amdgpu_fb = to_amdgpu_framebuffer(fb);
- drm_gem_object_unreference_unlocked(amdgpu_fb->obj);
+ drm_gem_object_put_unlocked(amdgpu_fb->obj);
drm_framebuffer_cleanup(fb);
kfree(amdgpu_fb);
}
@@ -542,14 +542,14 @@ amdgpu_user_framebuffer_create(struct drm_device *dev,
amdgpu_fb = kzalloc(sizeof(*amdgpu_fb), GFP_KERNEL);
if (amdgpu_fb == NULL) {
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return ERR_PTR(-ENOMEM);
}
ret = amdgpu_framebuffer_init(dev, amdgpu_fb, mode_cmd, obj);
if (ret) {
kfree(amdgpu_fb);
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index b59f37c83fa6..0f16986ec5bc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -68,13 +68,16 @@
* - 3.16.0 - Add reserved vmid support
* - 3.17.0 - Add AMDGPU_NUM_VRAM_CPU_PAGE_FAULTS.
* - 3.18.0 - Export gpu always on cu bitmap
+ * - 3.19.0 - Add support for UVD MJPEG decode
*/
#define KMS_DRIVER_MAJOR 3
-#define KMS_DRIVER_MINOR 18
+#define KMS_DRIVER_MINOR 19
#define KMS_DRIVER_PATCHLEVEL 0
int amdgpu_vram_limit = 0;
+int amdgpu_vis_vram_limit = 0;
int amdgpu_gart_size = -1; /* auto */
+int amdgpu_gtt_size = -1; /* auto */
int amdgpu_moverate = -1; /* auto */
int amdgpu_benchmarking = 0;
int amdgpu_testing = 0;
@@ -92,6 +95,7 @@ unsigned amdgpu_ip_block_mask = 0xffffffff;
int amdgpu_bapm = -1;
int amdgpu_deep_color = 0;
int amdgpu_vm_size = -1;
+int amdgpu_vm_fragment_size = -1;
int amdgpu_vm_block_size = -1;
int amdgpu_vm_fault_stop = 0;
int amdgpu_vm_debug = 0;
@@ -106,6 +110,7 @@ unsigned amdgpu_pcie_gen_cap = 0;
unsigned amdgpu_pcie_lane_cap = 0;
unsigned amdgpu_cg_mask = 0xffffffff;
unsigned amdgpu_pg_mask = 0xffffffff;
+unsigned amdgpu_sdma_phase_quantum = 32;
char *amdgpu_disable_cu = NULL;
char *amdgpu_virtual_display = NULL;
unsigned amdgpu_pp_feature_mask = 0xffffffff;
@@ -120,8 +125,14 @@ int amdgpu_lbpw = -1;
MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes");
module_param_named(vramlimit, amdgpu_vram_limit, int, 0600);
-MODULE_PARM_DESC(gartsize, "Size of PCIE/IGP gart to setup in megabytes (32, 64, etc., -1 = auto)");
-module_param_named(gartsize, amdgpu_gart_size, int, 0600);
+MODULE_PARM_DESC(vis_vramlimit, "Restrict visible VRAM for testing, in megabytes");
+module_param_named(vis_vramlimit, amdgpu_vis_vram_limit, int, 0444);
+
+MODULE_PARM_DESC(gartsize, "Size of GART to setup in megabytes (32, 64, etc., -1=auto)");
+module_param_named(gartsize, amdgpu_gart_size, uint, 0600);
+
+MODULE_PARM_DESC(gttsize, "Size of the GTT domain in megabytes (-1 = auto)");
+module_param_named(gttsize, amdgpu_gtt_size, int, 0600);
MODULE_PARM_DESC(moverate, "Maximum buffer migration rate in MB/s. (32, 64, etc., -1=auto, 0=1=disabled)");
module_param_named(moverate, amdgpu_moverate, int, 0600);
@@ -174,6 +185,9 @@ module_param_named(deep_color, amdgpu_deep_color, int, 0444);
MODULE_PARM_DESC(vm_size, "VM address space size in gigabytes (default 64GB)");
module_param_named(vm_size, amdgpu_vm_size, int, 0444);
+MODULE_PARM_DESC(vm_fragment_size, "VM fragment size in bits (4, 5, etc. 4 = 64K (default), Max 9 = 2M)");
+module_param_named(vm_fragment_size, amdgpu_vm_fragment_size, int, 0444);
+
MODULE_PARM_DESC(vm_block_size, "VM page table size in bits (default depending on vm_size)");
module_param_named(vm_block_size, amdgpu_vm_block_size, int, 0444);
@@ -186,7 +200,7 @@ module_param_named(vm_debug, amdgpu_vm_debug, int, 0644);
MODULE_PARM_DESC(vm_update_mode, "VM update using CPU (0 = never (default except for large BAR(LB)), 1 = Graphics only, 2 = Compute only (default for LB), 3 = Both");
module_param_named(vm_update_mode, amdgpu_vm_update_mode, int, 0444);
-MODULE_PARM_DESC(vram_page_split, "Number of pages after we split VRAM allocations (default 1024, -1 = disable)");
+MODULE_PARM_DESC(vram_page_split, "Number of pages after we split VRAM allocations (default 512, -1 = disable)");
module_param_named(vram_page_split, amdgpu_vram_page_split, int, 0444);
MODULE_PARM_DESC(exp_hw_support, "experimental hw support (1 = enable, 0 = disable (default))");
@@ -199,7 +213,7 @@ MODULE_PARM_DESC(sched_hw_submission, "the max number of HW submissions (default
module_param_named(sched_hw_submission, amdgpu_sched_hw_submission, int, 0444);
MODULE_PARM_DESC(ppfeaturemask, "all power features enabled (default))");
-module_param_named(ppfeaturemask, amdgpu_pp_feature_mask, int, 0444);
+module_param_named(ppfeaturemask, amdgpu_pp_feature_mask, uint, 0444);
MODULE_PARM_DESC(no_evict, "Support pinning request from user space (1 = enable, 0 = disable (default))");
module_param_named(no_evict, amdgpu_no_evict, int, 0444);
@@ -219,6 +233,9 @@ module_param_named(cg_mask, amdgpu_cg_mask, uint, 0444);
MODULE_PARM_DESC(pg_mask, "Powergating flags mask (0 = disable power gating)");
module_param_named(pg_mask, amdgpu_pg_mask, uint, 0444);
+MODULE_PARM_DESC(sdma_phase_quantum, "SDMA context switch phase quantum (x 1K GPU clock cycles, 0 = no change (default 32))");
+module_param_named(sdma_phase_quantum, amdgpu_sdma_phase_quantum, uint, 0444);
+
MODULE_PARM_DESC(disable_cu, "Disable CUs (se.sh.cu,...)");
module_param_named(disable_cu, amdgpu_disable_cu, charp, 0444);
@@ -803,7 +820,6 @@ static struct drm_driver kms_driver = {
.open = amdgpu_driver_open_kms,
.postclose = amdgpu_driver_postclose_kms,
.lastclose = amdgpu_driver_lastclose_kms,
- .set_busid = drm_pci_set_busid,
.unload = amdgpu_driver_unload_kms,
.get_vblank_counter = amdgpu_get_vblank_counter_kms,
.enable_vblank = amdgpu_enable_vblank_kms,
@@ -823,7 +839,6 @@ static struct drm_driver kms_driver = {
.gem_close_object = amdgpu_gem_object_close,
.dumb_create = amdgpu_mode_dumb_create,
.dumb_map_offset = amdgpu_mode_dumb_mmap,
- .dumb_destroy = drm_gem_dumb_destroy,
.fops = &amdgpu_driver_kms_fops,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
index c0d8c6ff6380..9afa9c097e1f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
@@ -118,7 +118,7 @@ static void amdgpufb_destroy_pinned_object(struct drm_gem_object *gobj)
amdgpu_bo_unpin(abo);
amdgpu_bo_unreserve(abo);
}
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
}
static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev,
@@ -245,13 +245,12 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth);
- info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
info->fbops = &amdgpufb_ops;
tmp = amdgpu_bo_gpu_offset(abo) - adev->mc.vram_start;
info->fix.smem_start = adev->mc.aper_base + tmp;
info->fix.smem_len = amdgpu_bo_size(abo);
- info->screen_base = abo->kptr;
+ info->screen_base = amdgpu_bo_kptr(abo);
info->screen_size = amdgpu_bo_size(abo);
drm_fb_helper_fill_var(info, &rfbdev->helper, sizes->fb_width, sizes->fb_height);
@@ -281,7 +280,7 @@ out:
}
if (fb && ret) {
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
drm_framebuffer_unregister_private(fb);
drm_framebuffer_cleanup(fb);
kfree(fb);
@@ -312,31 +311,7 @@ static int amdgpu_fbdev_destroy(struct drm_device *dev, struct amdgpu_fbdev *rfb
return 0;
}
-/** Sets the color ramps on behalf of fbcon */
-static void amdgpu_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
- u16 blue, int regno)
-{
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
-
- amdgpu_crtc->lut_r[regno] = red >> 6;
- amdgpu_crtc->lut_g[regno] = green >> 6;
- amdgpu_crtc->lut_b[regno] = blue >> 6;
-}
-
-/** Gets the color ramps on behalf of fbcon */
-static void amdgpu_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
- u16 *blue, int regno)
-{
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
-
- *red = amdgpu_crtc->lut_r[regno] << 6;
- *green = amdgpu_crtc->lut_g[regno] << 6;
- *blue = amdgpu_crtc->lut_b[regno] << 6;
-}
-
static const struct drm_fb_helper_funcs amdgpu_fb_helper_funcs = {
- .gamma_set = amdgpu_crtc_fb_gamma_set,
- .gamma_get = amdgpu_crtc_fb_gamma_get,
.fb_probe = amdgpufb_create,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
index a57abc1a25fb..f4370081f6e6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
@@ -55,6 +55,7 @@
/*
* Common GART table functions.
*/
+
/**
* amdgpu_gart_table_ram_alloc - allocate system ram for gart page table
*
@@ -131,7 +132,7 @@ int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev)
PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
- NULL, NULL, &adev->gart.robj);
+ NULL, NULL, 0, &adev->gart.robj);
if (r) {
return r;
}
@@ -263,6 +264,41 @@ int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
}
/**
+ * amdgpu_gart_map - map dma_addresses into GART entries
+ *
+ * @adev: amdgpu_device pointer
+ * @offset: offset into the GPU's gart aperture
+ * @pages: number of pages to bind
+ * @dma_addr: DMA addresses of pages
+ *
+ * Map the dma_addresses into GART entries (all asics).
+ * Returns 0 for success, -EINVAL for failure.
+ */
+int amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset,
+ int pages, dma_addr_t *dma_addr, uint64_t flags,
+ void *dst)
+{
+ uint64_t page_base;
+ unsigned i, j, t;
+
+ if (!adev->gart.ready) {
+ WARN(1, "trying to bind memory to uninitialized GART !\n");
+ return -EINVAL;
+ }
+
+ t = offset / AMDGPU_GPU_PAGE_SIZE;
+
+ for (i = 0; i < pages; i++) {
+ page_base = dma_addr[i];
+ for (j = 0; j < (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); j++, t++) {
+ amdgpu_gart_set_pte_pde(adev, dst, t, page_base, flags);
+ page_base += AMDGPU_GPU_PAGE_SIZE;
+ }
+ }
+ return 0;
+}
+
+/**
* amdgpu_gart_bind - bind pages into the gart page table
*
* @adev: amdgpu_device pointer
@@ -279,31 +315,30 @@ int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
int pages, struct page **pagelist, dma_addr_t *dma_addr,
uint64_t flags)
{
- unsigned t;
- unsigned p;
- uint64_t page_base;
- int i, j;
+#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
+ unsigned i,t,p;
+#endif
+ int r;
if (!adev->gart.ready) {
WARN(1, "trying to bind memory to uninitialized GART !\n");
return -EINVAL;
}
+#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
t = offset / AMDGPU_GPU_PAGE_SIZE;
p = t / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
-
- for (i = 0; i < pages; i++, p++) {
-#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
+ for (i = 0; i < pages; i++, p++)
adev->gart.pages[p] = pagelist[i];
#endif
- if (adev->gart.ptr) {
- page_base = dma_addr[i];
- for (j = 0; j < (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); j++, t++) {
- amdgpu_gart_set_pte_pde(adev, adev->gart.ptr, t, page_base, flags);
- page_base += AMDGPU_GPU_PAGE_SIZE;
- }
- }
+
+ if (adev->gart.ptr) {
+ r = amdgpu_gart_map(adev, offset, pages, dma_addr, flags,
+ adev->gart.ptr);
+ if (r)
+ return r;
}
+
mb();
amdgpu_gart_flush_gpu_tlb(adev, 0);
return 0;
@@ -333,8 +368,8 @@ int amdgpu_gart_init(struct amdgpu_device *adev)
if (r)
return r;
/* Compute table size */
- adev->gart.num_cpu_pages = adev->mc.gtt_size / PAGE_SIZE;
- adev->gart.num_gpu_pages = adev->mc.gtt_size / AMDGPU_GPU_PAGE_SIZE;
+ adev->gart.num_cpu_pages = adev->mc.gart_size / PAGE_SIZE;
+ adev->gart.num_gpu_pages = adev->mc.gart_size / AMDGPU_GPU_PAGE_SIZE;
DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n",
adev->gart.num_cpu_pages, adev->gart.num_gpu_pages);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h
new file mode 100644
index 000000000000..afbe803b1a13
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __AMDGPU_GART_H__
+#define __AMDGPU_GART_H__
+
+#include <linux/types.h>
+
+/*
+ * GART structures, functions & helpers
+ */
+struct amdgpu_device;
+struct amdgpu_bo;
+struct amdgpu_gart_funcs;
+
+#define AMDGPU_GPU_PAGE_SIZE 4096
+#define AMDGPU_GPU_PAGE_MASK (AMDGPU_GPU_PAGE_SIZE - 1)
+#define AMDGPU_GPU_PAGE_SHIFT 12
+#define AMDGPU_GPU_PAGE_ALIGN(a) (((a) + AMDGPU_GPU_PAGE_MASK) & ~AMDGPU_GPU_PAGE_MASK)
+
+struct amdgpu_gart {
+ dma_addr_t table_addr;
+ struct amdgpu_bo *robj;
+ void *ptr;
+ unsigned num_gpu_pages;
+ unsigned num_cpu_pages;
+ unsigned table_size;
+#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
+ struct page **pages;
+#endif
+ bool ready;
+
+ /* Asic default pte flags */
+ uint64_t gart_pte_flags;
+
+ const struct amdgpu_gart_funcs *gart_funcs;
+};
+
+int amdgpu_gart_table_ram_alloc(struct amdgpu_device *adev);
+void amdgpu_gart_table_ram_free(struct amdgpu_device *adev);
+int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev);
+void amdgpu_gart_table_vram_free(struct amdgpu_device *adev);
+int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev);
+void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev);
+int amdgpu_gart_init(struct amdgpu_device *adev);
+void amdgpu_gart_fini(struct amdgpu_device *adev);
+int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
+ int pages);
+int amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset,
+ int pages, dma_addr_t *dma_addr, uint64_t flags,
+ void *dst);
+int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
+ int pages, struct page **pagelist,
+ dma_addr_t *dma_addr, uint64_t flags);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 621f739103a6..7171968f261e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -49,7 +49,6 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
struct drm_gem_object **obj)
{
struct amdgpu_bo *robj;
- unsigned long max_size;
int r;
*obj = NULL;
@@ -58,20 +57,9 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
alignment = PAGE_SIZE;
}
- if (!(initial_domain & (AMDGPU_GEM_DOMAIN_GDS | AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA))) {
- /* Maximum bo size is the unpinned gtt size since we use the gtt to
- * handle vram to system pool migrations.
- */
- max_size = adev->mc.gtt_size - adev->gart_pin_size;
- if (size > max_size) {
- DRM_DEBUG("Allocation size %ldMb bigger than %ldMb limit\n",
- size >> 20, max_size >> 20);
- return -ENOMEM;
- }
- }
retry:
r = amdgpu_bo_create(adev, size, alignment, kernel, initial_domain,
- flags, NULL, NULL, &robj);
+ flags, NULL, NULL, 0, &robj);
if (r) {
if (r != -ERESTARTSYS) {
if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
@@ -103,7 +91,7 @@ void amdgpu_gem_force_release(struct amdgpu_device *adev)
spin_lock(&file->table_lock);
idr_for_each_entry(&file->object_idr, gobj, handle) {
WARN_ONCE(1, "And also active allocations!\n");
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
}
idr_destroy(&file->object_idr);
spin_unlock(&file->table_lock);
@@ -237,9 +225,7 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
if (args->in.domain_flags & ~(AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
AMDGPU_GEM_CREATE_CPU_GTT_USWC |
- AMDGPU_GEM_CREATE_VRAM_CLEARED|
- AMDGPU_GEM_CREATE_SHADOW |
- AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS))
+ AMDGPU_GEM_CREATE_VRAM_CLEARED))
return -EINVAL;
/* reject invalid gem domains */
@@ -275,7 +261,7 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
r = drm_gem_handle_create(filp, gobj, &handle);
/* drop reference from allocate - handle holds it now */
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
if (r)
return r;
@@ -318,7 +304,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
return r;
bo = gem_to_amdgpu_bo(gobj);
- bo->prefered_domains = AMDGPU_GEM_DOMAIN_GTT;
+ bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT;
bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
r = amdgpu_ttm_tt_set_userptr(bo->tbo.ttm, args->addr, args->flags);
if (r)
@@ -353,7 +339,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
r = drm_gem_handle_create(filp, gobj, &handle);
/* drop reference from allocate - handle holds it now */
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
if (r)
return r;
@@ -367,7 +353,7 @@ unlock_mmap_sem:
up_read(&current->mm->mmap_sem);
release_object:
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
return r;
}
@@ -386,11 +372,11 @@ int amdgpu_mode_dumb_mmap(struct drm_file *filp,
robj = gem_to_amdgpu_bo(gobj);
if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm) ||
(robj->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) {
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
return -EPERM;
}
*offset_p = amdgpu_bo_mmap_offset(robj);
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
return 0;
}
@@ -460,7 +446,7 @@ int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
} else
r = ret;
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
return r;
}
@@ -503,7 +489,7 @@ int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
unreserve:
amdgpu_bo_unreserve(robj);
out:
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
return r;
}
@@ -635,7 +621,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
switch (args->operation) {
case AMDGPU_VA_OP_MAP:
- r = amdgpu_vm_alloc_pts(adev, bo_va->vm, args->va_address,
+ r = amdgpu_vm_alloc_pts(adev, bo_va->base.vm, args->va_address,
args->map_size);
if (r)
goto error_backoff;
@@ -655,7 +641,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
args->map_size);
break;
case AMDGPU_VA_OP_REPLACE:
- r = amdgpu_vm_alloc_pts(adev, bo_va->vm, args->va_address,
+ r = amdgpu_vm_alloc_pts(adev, bo_va->base.vm, args->va_address,
args->map_size);
if (r)
goto error_backoff;
@@ -676,7 +662,7 @@ error_backoff:
ttm_eu_backoff_reservation(&ticket, &list);
error_unref:
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
return r;
}
@@ -701,11 +687,11 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
switch (args->op) {
case AMDGPU_GEM_OP_GET_GEM_CREATE_INFO: {
struct drm_amdgpu_gem_create_in info;
- void __user *out = (void __user *)(uintptr_t)args->value;
+ void __user *out = u64_to_user_ptr(args->value);
info.bo_size = robj->gem_base.size;
info.alignment = robj->tbo.mem.page_alignment << PAGE_SHIFT;
- info.domains = robj->prefered_domains;
+ info.domains = robj->preferred_domains;
info.domain_flags = robj->flags;
amdgpu_bo_unreserve(robj);
if (copy_to_user(out, &info, sizeof(info)))
@@ -723,10 +709,10 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
amdgpu_bo_unreserve(robj);
break;
}
- robj->prefered_domains = args->value & (AMDGPU_GEM_DOMAIN_VRAM |
+ robj->preferred_domains = args->value & (AMDGPU_GEM_DOMAIN_VRAM |
AMDGPU_GEM_DOMAIN_GTT |
AMDGPU_GEM_DOMAIN_CPU);
- robj->allowed_domains = robj->prefered_domains;
+ robj->allowed_domains = robj->preferred_domains;
if (robj->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
robj->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
@@ -738,7 +724,7 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
}
out:
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
return r;
}
@@ -766,7 +752,7 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv,
r = drm_gem_handle_create(file_priv, gobj, &handle);
/* drop reference from allocate - handle holds it now */
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
if (r) {
return r;
}
@@ -784,6 +770,7 @@ static int amdgpu_debugfs_gem_bo_info(int id, void *ptr, void *data)
unsigned domain;
const char *placement;
unsigned pin_count;
+ uint64_t offset;
domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
switch (domain) {
@@ -798,9 +785,12 @@ static int amdgpu_debugfs_gem_bo_info(int id, void *ptr, void *data)
placement = " CPU";
break;
}
- seq_printf(m, "\t0x%08x: %12ld byte %s @ 0x%010Lx",
- id, amdgpu_bo_size(bo), placement,
- amdgpu_bo_gpu_offset(bo));
+ seq_printf(m, "\t0x%08x: %12ld byte %s",
+ id, amdgpu_bo_size(bo), placement);
+
+ offset = ACCESS_ONCE(bo->tbo.mem.start);
+ if (offset != AMDGPU_BO_INVALID_OFFSET)
+ seq_printf(m, " @ 0x%010Lx", offset);
pin_count = ACCESS_ONCE(bo->pin_count);
if (pin_count)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index e26108aad3fe..4f6c68fc1dd9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -125,7 +125,8 @@ void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev)
if (mec >= adev->gfx.mec.num_mec)
break;
- if (adev->gfx.mec.num_mec > 1) {
+ /* FIXME: spreading the queues across pipes causes perf regressions */
+ if (0) {
/* policy: amdgpu owns the first two queues of the first MEC */
if (mec == 0 && queue < 2)
set_bit(i, adev->gfx.mec.queue_bitmap);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
index f7d22c44034d..0d15eb7d31d7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
@@ -28,7 +28,7 @@
struct amdgpu_gtt_mgr {
struct drm_mm mm;
spinlock_t lock;
- uint64_t available;
+ atomic64_t available;
};
/**
@@ -42,15 +42,19 @@ struct amdgpu_gtt_mgr {
static int amdgpu_gtt_mgr_init(struct ttm_mem_type_manager *man,
unsigned long p_size)
{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev);
struct amdgpu_gtt_mgr *mgr;
+ uint64_t start, size;
mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
if (!mgr)
return -ENOMEM;
- drm_mm_init(&mgr->mm, 0, p_size);
+ start = AMDGPU_GTT_MAX_TRANSFER_SIZE * AMDGPU_GTT_NUM_TRANSFER_WINDOWS;
+ size = (adev->mc.gart_size >> PAGE_SHIFT) - start;
+ drm_mm_init(&mgr->mm, start, size);
spin_lock_init(&mgr->lock);
- mgr->available = p_size;
+ atomic64_set(&mgr->available, p_size);
man->priv = mgr;
return 0;
}
@@ -81,6 +85,20 @@ static int amdgpu_gtt_mgr_fini(struct ttm_mem_type_manager *man)
}
/**
+ * amdgpu_gtt_mgr_is_allocated - Check if mem has address space
+ *
+ * @mem: the mem object to check
+ *
+ * Check if a mem object has already address space allocated.
+ */
+bool amdgpu_gtt_mgr_is_allocated(struct ttm_mem_reg *mem)
+{
+ struct drm_mm_node *node = mem->mm_node;
+
+ return (node->start != AMDGPU_BO_INVALID_OFFSET);
+}
+
+/**
* amdgpu_gtt_mgr_alloc - allocate new ranges
*
* @man: TTM memory type manager
@@ -90,18 +108,19 @@ static int amdgpu_gtt_mgr_fini(struct ttm_mem_type_manager *man)
*
* Allocate the address space for a node.
*/
-int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man,
- struct ttm_buffer_object *tbo,
- const struct ttm_place *place,
- struct ttm_mem_reg *mem)
+static int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man,
+ struct ttm_buffer_object *tbo,
+ const struct ttm_place *place,
+ struct ttm_mem_reg *mem)
{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev);
struct amdgpu_gtt_mgr *mgr = man->priv;
struct drm_mm_node *node = mem->mm_node;
enum drm_mm_insert_mode mode;
unsigned long fpfn, lpfn;
int r;
- if (node->start != AMDGPU_BO_INVALID_OFFSET)
+ if (amdgpu_gtt_mgr_is_allocated(mem))
return 0;
if (place)
@@ -112,7 +131,7 @@ int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man,
if (place && place->lpfn)
lpfn = place->lpfn;
else
- lpfn = man->size;
+ lpfn = adev->gart.num_cpu_pages;
mode = DRM_MM_INSERT_BEST;
if (place && place->flags & TTM_PL_FLAG_TOPDOWN)
@@ -124,25 +143,12 @@ int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man,
fpfn, lpfn, mode);
spin_unlock(&mgr->lock);
- if (!r) {
+ if (!r)
mem->start = node->start;
- if (&tbo->mem == mem)
- tbo->offset = (tbo->mem.start << PAGE_SHIFT) +
- tbo->bdev->man[tbo->mem.mem_type].gpu_offset;
- }
return r;
}
-void amdgpu_gtt_mgr_print(struct seq_file *m, struct ttm_mem_type_manager *man)
-{
- struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev);
- struct amdgpu_gtt_mgr *mgr = man->priv;
-
- seq_printf(m, "man size:%llu pages, gtt available:%llu pages, usage:%lluMB\n",
- man->size, mgr->available, (u64)atomic64_read(&adev->gtt_usage) >> 20);
-
-}
/**
* amdgpu_gtt_mgr_new - allocate a new node
*
@@ -163,11 +169,11 @@ static int amdgpu_gtt_mgr_new(struct ttm_mem_type_manager *man,
int r;
spin_lock(&mgr->lock);
- if (mgr->available < mem->num_pages) {
+ if (atomic64_read(&mgr->available) < mem->num_pages) {
spin_unlock(&mgr->lock);
return 0;
}
- mgr->available -= mem->num_pages;
+ atomic64_sub(mem->num_pages, &mgr->available);
spin_unlock(&mgr->lock);
node = kzalloc(sizeof(*node), GFP_KERNEL);
@@ -194,9 +200,7 @@ static int amdgpu_gtt_mgr_new(struct ttm_mem_type_manager *man,
return 0;
err_out:
- spin_lock(&mgr->lock);
- mgr->available += mem->num_pages;
- spin_unlock(&mgr->lock);
+ atomic64_add(mem->num_pages, &mgr->available);
return r;
}
@@ -223,30 +227,47 @@ static void amdgpu_gtt_mgr_del(struct ttm_mem_type_manager *man,
spin_lock(&mgr->lock);
if (node->start != AMDGPU_BO_INVALID_OFFSET)
drm_mm_remove_node(node);
- mgr->available += mem->num_pages;
spin_unlock(&mgr->lock);
+ atomic64_add(mem->num_pages, &mgr->available);
kfree(node);
mem->mm_node = NULL;
}
/**
+ * amdgpu_gtt_mgr_usage - return usage of GTT domain
+ *
+ * @man: TTM memory type manager
+ *
+ * Return how many bytes are used in the GTT domain
+ */
+uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager *man)
+{
+ struct amdgpu_gtt_mgr *mgr = man->priv;
+
+ return (u64)(man->size - atomic64_read(&mgr->available)) * PAGE_SIZE;
+}
+
+/**
* amdgpu_gtt_mgr_debug - dump VRAM table
*
* @man: TTM memory type manager
- * @prefix: text prefix
+ * @printer: DRM printer to use
*
* Dump the table content using printk.
*/
static void amdgpu_gtt_mgr_debug(struct ttm_mem_type_manager *man,
- const char *prefix)
+ struct drm_printer *printer)
{
struct amdgpu_gtt_mgr *mgr = man->priv;
- struct drm_printer p = drm_debug_printer(prefix);
spin_lock(&mgr->lock);
- drm_mm_print(&mgr->mm, &p);
+ drm_mm_print(&mgr->mm, printer);
spin_unlock(&mgr->lock);
+
+ drm_printf(printer, "man size:%llu pages, gtt available:%llu pages, usage:%lluMB\n",
+ man->size, (u64)atomic64_read(&mgr->available),
+ amdgpu_gtt_mgr_usage(man) >> 20);
}
const struct ttm_mem_type_manager_func amdgpu_gtt_mgr_func = {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index f774b3f497d2..659997bfff30 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -130,6 +130,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
unsigned i;
int r = 0;
+ bool need_pipe_sync = false;
if (num_ibs == 0)
return -EINVAL;
@@ -165,15 +166,15 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
if (ring->funcs->emit_pipeline_sync && job &&
((tmp = amdgpu_sync_get_fence(&job->sched_sync)) ||
amdgpu_vm_need_pipeline_sync(ring, job))) {
- amdgpu_ring_emit_pipeline_sync(ring);
+ need_pipe_sync = true;
dma_fence_put(tmp);
}
if (ring->funcs->insert_start)
ring->funcs->insert_start(ring);
- if (vm) {
- r = amdgpu_vm_flush(ring, job);
+ if (job) {
+ r = amdgpu_vm_flush(ring, job, need_pipe_sync);
if (r) {
amdgpu_ring_undo(ring);
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index 62da6c5c6095..538e5f27d120 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -220,6 +220,11 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
int r = 0;
spin_lock_init(&adev->irq.lock);
+
+ if (!adev->enable_virtual_display)
+ /* Disable vblank irqs aggressively for power-saving */
+ adev->ddev->vblank_disable_immediate = true;
+
r = drm_vblank_init(adev->ddev, adev->mode_info.num_crtc);
if (r) {
return r;
@@ -263,7 +268,6 @@ void amdgpu_irq_fini(struct amdgpu_device *adev)
{
unsigned i, j;
- drm_vblank_cleanup(adev->ddev);
if (adev->irq.installed) {
drm_irq_uninstall(adev->ddev);
adev->irq.installed = false;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 3d641e10e6b6..4510627ae83e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -81,6 +81,8 @@ int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
r = amdgpu_ib_get(adev, NULL, size, &(*job)->ibs[0]);
if (r)
kfree(*job);
+ else
+ (*job)->vm_pd_addr = adev->gart.table_addr;
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index b0b23101d1c8..e16229000a98 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -158,7 +158,6 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
"Error during ACPI methods call\n");
}
- amdgpu_amdkfd_load_interface(adev);
amdgpu_amdkfd_device_probe(adev);
amdgpu_amdkfd_device_init(adev);
@@ -456,13 +455,13 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
ui64 = atomic64_read(&adev->num_vram_cpu_page_faults);
return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
case AMDGPU_INFO_VRAM_USAGE:
- ui64 = atomic64_read(&adev->vram_usage);
+ ui64 = amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
case AMDGPU_INFO_VIS_VRAM_USAGE:
- ui64 = atomic64_read(&adev->vram_vis_usage);
+ ui64 = amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
case AMDGPU_INFO_GTT_USAGE:
- ui64 = atomic64_read(&adev->gtt_usage);
+ ui64 = amdgpu_gtt_mgr_usage(&adev->mman.bdev.man[TTM_PL_TT]);
return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
case AMDGPU_INFO_GDS_CONFIG: {
struct drm_amdgpu_info_gds gds_info;
@@ -485,7 +484,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
vram_gtt.vram_size -= adev->vram_pin_size;
vram_gtt.vram_cpu_accessible_size = adev->mc.visible_vram_size;
vram_gtt.vram_cpu_accessible_size -= (adev->vram_pin_size - adev->invisible_pin_size);
- vram_gtt.gtt_size = adev->mc.gtt_size;
+ vram_gtt.gtt_size = adev->mman.bdev.man[TTM_PL_TT].size;
+ vram_gtt.gtt_size *= PAGE_SIZE;
vram_gtt.gtt_size -= adev->gart_pin_size;
return copy_to_user(out, &vram_gtt,
min((size_t)size, sizeof(vram_gtt))) ? -EFAULT : 0;
@@ -497,7 +497,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
mem.vram.total_heap_size = adev->mc.real_vram_size;
mem.vram.usable_heap_size =
adev->mc.real_vram_size - adev->vram_pin_size;
- mem.vram.heap_usage = atomic64_read(&adev->vram_usage);
+ mem.vram.heap_usage =
+ amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
mem.vram.max_allocation = mem.vram.usable_heap_size * 3 / 4;
mem.cpu_accessible_vram.total_heap_size =
@@ -506,14 +507,16 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
adev->mc.visible_vram_size -
(adev->vram_pin_size - adev->invisible_pin_size);
mem.cpu_accessible_vram.heap_usage =
- atomic64_read(&adev->vram_vis_usage);
+ amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
mem.cpu_accessible_vram.max_allocation =
mem.cpu_accessible_vram.usable_heap_size * 3 / 4;
- mem.gtt.total_heap_size = adev->mc.gtt_size;
- mem.gtt.usable_heap_size =
- adev->mc.gtt_size - adev->gart_pin_size;
- mem.gtt.heap_usage = atomic64_read(&adev->gtt_usage);
+ mem.gtt.total_heap_size = adev->mman.bdev.man[TTM_PL_TT].size;
+ mem.gtt.total_heap_size *= PAGE_SIZE;
+ mem.gtt.usable_heap_size = mem.gtt.total_heap_size
+ - adev->gart_pin_size;
+ mem.gtt.heap_usage =
+ amdgpu_gtt_mgr_usage(&adev->mman.bdev.man[TTM_PL_TT]);
mem.gtt.max_allocation = mem.gtt.usable_heap_size * 3 / 4;
return copy_to_user(out, &mem,
@@ -571,8 +574,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
dev_info.max_engine_clock = amdgpu_dpm_get_sclk(adev, false) * 10;
dev_info.max_memory_clock = amdgpu_dpm_get_mclk(adev, false) * 10;
} else {
- dev_info.max_engine_clock = adev->pm.default_sclk * 10;
- dev_info.max_memory_clock = adev->pm.default_mclk * 10;
+ dev_info.max_engine_clock = adev->clock.default_sclk * 10;
+ dev_info.max_memory_clock = adev->clock.default_mclk * 10;
}
dev_info.enabled_rb_pipes_mask = adev->gfx.config.backend_enable_mask;
dev_info.num_rb_pipes = adev->gfx.config.max_backends_per_se *
@@ -587,10 +590,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
dev_info.virtual_address_offset = AMDGPU_VA_RESERVED_SIZE;
dev_info.virtual_address_max = (uint64_t)adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE;
dev_info.virtual_address_alignment = max((int)PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE);
- dev_info.pte_fragment_size = (1 << AMDGPU_LOG2_PAGES_PER_FRAG) *
- AMDGPU_GPU_PAGE_SIZE;
+ dev_info.pte_fragment_size = (1 << adev->vm_manager.fragment_size) * AMDGPU_GPU_PAGE_SIZE;
dev_info.gart_page_size = AMDGPU_GPU_PAGE_SIZE;
-
dev_info.cu_active_number = adev->gfx.cu_info.number;
dev_info.cu_ao_mask = adev->gfx.cu_info.ao_cu_mask;
dev_info.ce_ram_size = adev->gfx.ce_ram_size;
@@ -839,7 +840,7 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
}
if (amdgpu_sriov_vf(adev)) {
- r = amdgpu_map_static_csa(adev, &fpriv->vm);
+ r = amdgpu_map_static_csa(adev, &fpriv->vm, &fpriv->csa_va);
if (r)
goto out_suspend;
}
@@ -892,8 +893,8 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
if (amdgpu_sriov_vf(adev)) {
/* TODO: how to handle reserve failure */
BUG_ON(amdgpu_bo_reserve(adev->virt.csa_obj, true));
- amdgpu_vm_bo_rmv(adev, fpriv->vm.csa_bo_va);
- fpriv->vm.csa_bo_va = NULL;
+ amdgpu_vm_bo_rmv(adev, fpriv->csa_va);
+ fpriv->csa_va = NULL;
amdgpu_bo_unreserve(adev->virt.csa_obj);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
index 6558a3ed57a7..3b0f2ec6eec7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
@@ -51,7 +51,7 @@ struct amdgpu_mn {
/* objects protected by lock */
struct mutex lock;
- struct rb_root objects;
+ struct rb_root_cached objects;
};
struct amdgpu_mn_node {
@@ -76,8 +76,8 @@ static void amdgpu_mn_destroy(struct work_struct *work)
mutex_lock(&adev->mn_lock);
mutex_lock(&rmn->lock);
hash_del(&rmn->node);
- rbtree_postorder_for_each_entry_safe(node, next_node, &rmn->objects,
- it.rb) {
+ rbtree_postorder_for_each_entry_safe(node, next_node,
+ &rmn->objects.rb_root, it.rb) {
list_for_each_entry_safe(bo, next_bo, &node->bos, mn_list) {
bo->mn = NULL;
list_del_init(&bo->mn_list);
@@ -147,36 +147,6 @@ static void amdgpu_mn_invalidate_node(struct amdgpu_mn_node *node,
}
/**
- * amdgpu_mn_invalidate_page - callback to notify about mm change
- *
- * @mn: our notifier
- * @mn: the mm this callback is about
- * @address: address of invalidate page
- *
- * Invalidation of a single page. Blocks for all BOs mapping it
- * and unmap them by move them into system domain again.
- */
-static void amdgpu_mn_invalidate_page(struct mmu_notifier *mn,
- struct mm_struct *mm,
- unsigned long address)
-{
- struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn);
- struct interval_tree_node *it;
-
- mutex_lock(&rmn->lock);
-
- it = interval_tree_iter_first(&rmn->objects, address, address);
- if (it) {
- struct amdgpu_mn_node *node;
-
- node = container_of(it, struct amdgpu_mn_node, it);
- amdgpu_mn_invalidate_node(node, address, address);
- }
-
- mutex_unlock(&rmn->lock);
-}
-
-/**
* amdgpu_mn_invalidate_range_start - callback to notify about mm change
*
* @mn: our notifier
@@ -215,7 +185,6 @@ static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn,
static const struct mmu_notifier_ops amdgpu_mn_ops = {
.release = amdgpu_mn_release,
- .invalidate_page = amdgpu_mn_invalidate_page,
.invalidate_range_start = amdgpu_mn_invalidate_range_start,
};
@@ -252,7 +221,7 @@ static struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev)
rmn->mm = mm;
rmn->mn.ops = &amdgpu_mn_ops;
mutex_init(&rmn->lock);
- rmn->objects = RB_ROOT;
+ rmn->objects = RB_ROOT_CACHED;
r = __mmu_notifier_register(&rmn->mn, mm);
if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
index 43a9d3aec6c4..2af2678ddaf6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
@@ -257,15 +257,7 @@ struct amdgpu_audio {
int num_pins;
};
-struct amdgpu_mode_mc_save {
- u32 vga_render_control;
- u32 vga_hdp_control;
- bool crtc_enabled[AMDGPU_MAX_CRTCS];
-};
-
struct amdgpu_display_funcs {
- /* vga render */
- void (*set_vga_render_state)(struct amdgpu_device *adev, bool render);
/* display watermarks */
void (*bandwidth_update)(struct amdgpu_device *adev);
/* get frame count */
@@ -300,10 +292,6 @@ struct amdgpu_display_funcs {
uint16_t connector_object_id,
struct amdgpu_hpd *hpd,
struct amdgpu_router *router);
- void (*stop_mc_access)(struct amdgpu_device *adev,
- struct amdgpu_mode_mc_save *save);
- void (*resume_mc_access)(struct amdgpu_device *adev,
- struct amdgpu_mode_mc_save *save);
};
struct amdgpu_mode_info {
@@ -369,7 +357,6 @@ struct amdgpu_atom_ss {
struct amdgpu_crtc {
struct drm_crtc base;
int crtc_id;
- u16 lut_r[256], lut_g[256], lut_b[256];
bool enabled;
bool can_tile;
uint32_t crtc_offset;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 8ee69652be8c..9e495da0bb03 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -37,55 +37,6 @@
#include "amdgpu.h"
#include "amdgpu_trace.h"
-
-
-static u64 amdgpu_get_vis_part_size(struct amdgpu_device *adev,
- struct ttm_mem_reg *mem)
-{
- if (mem->start << PAGE_SHIFT >= adev->mc.visible_vram_size)
- return 0;
-
- return ((mem->start << PAGE_SHIFT) + mem->size) >
- adev->mc.visible_vram_size ?
- adev->mc.visible_vram_size - (mem->start << PAGE_SHIFT) :
- mem->size;
-}
-
-static void amdgpu_update_memory_usage(struct amdgpu_device *adev,
- struct ttm_mem_reg *old_mem,
- struct ttm_mem_reg *new_mem)
-{
- u64 vis_size;
- if (!adev)
- return;
-
- if (new_mem) {
- switch (new_mem->mem_type) {
- case TTM_PL_TT:
- atomic64_add(new_mem->size, &adev->gtt_usage);
- break;
- case TTM_PL_VRAM:
- atomic64_add(new_mem->size, &adev->vram_usage);
- vis_size = amdgpu_get_vis_part_size(adev, new_mem);
- atomic64_add(vis_size, &adev->vram_vis_usage);
- break;
- }
- }
-
- if (old_mem) {
- switch (old_mem->mem_type) {
- case TTM_PL_TT:
- atomic64_sub(old_mem->size, &adev->gtt_usage);
- break;
- case TTM_PL_VRAM:
- atomic64_sub(old_mem->size, &adev->vram_usage);
- vis_size = amdgpu_get_vis_part_size(adev, old_mem);
- atomic64_sub(vis_size, &adev->vram_vis_usage);
- break;
- }
- }
-}
-
static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
@@ -93,7 +44,7 @@ static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
bo = container_of(tbo, struct amdgpu_bo, tbo);
- amdgpu_update_memory_usage(adev, &bo->tbo.mem, NULL);
+ amdgpu_bo_kunmap(bo);
drm_gem_object_release(&bo->gem_base);
amdgpu_bo_unref(&bo->parent);
@@ -140,7 +91,10 @@ static void amdgpu_ttm_placement_init(struct amdgpu_device *adev,
if (domain & AMDGPU_GEM_DOMAIN_GTT) {
places[c].fpfn = 0;
- places[c].lpfn = 0;
+ if (flags & AMDGPU_GEM_CREATE_SHADOW)
+ places[c].lpfn = adev->mc.gart_size >> PAGE_SHIFT;
+ else
+ places[c].lpfn = 0;
places[c].flags = TTM_PL_FLAG_TT;
if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
places[c].flags |= TTM_PL_FLAG_WC |
@@ -219,7 +173,7 @@ static void amdgpu_fill_placement_to_bo(struct amdgpu_bo *bo,
}
/**
- * amdgpu_bo_create_kernel - create BO for kernel use
+ * amdgpu_bo_create_reserved - create reserved BO for kernel use
*
* @adev: amdgpu device object
* @size: size for the new BO
@@ -229,24 +183,30 @@ static void amdgpu_fill_placement_to_bo(struct amdgpu_bo *bo,
* @gpu_addr: GPU addr of the pinned BO
* @cpu_addr: optional CPU address mapping
*
- * Allocates and pins a BO for kernel internal use.
+ * Allocates and pins a BO for kernel internal use, and returns it still
+ * reserved.
*
* Returns 0 on success, negative error code otherwise.
*/
-int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
- unsigned long size, int align,
- u32 domain, struct amdgpu_bo **bo_ptr,
- u64 *gpu_addr, void **cpu_addr)
+int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
+ unsigned long size, int align,
+ u32 domain, struct amdgpu_bo **bo_ptr,
+ u64 *gpu_addr, void **cpu_addr)
{
+ bool free = false;
int r;
- r = amdgpu_bo_create(adev, size, align, true, domain,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
- AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
- NULL, NULL, bo_ptr);
- if (r) {
- dev_err(adev->dev, "(%d) failed to allocate kernel bo\n", r);
- return r;
+ if (!*bo_ptr) {
+ r = amdgpu_bo_create(adev, size, align, true, domain,
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
+ NULL, NULL, 0, bo_ptr);
+ if (r) {
+ dev_err(adev->dev, "(%d) failed to allocate kernel bo\n",
+ r);
+ return r;
+ }
+ free = true;
}
r = amdgpu_bo_reserve(*bo_ptr, false);
@@ -269,20 +229,52 @@ int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
}
}
- amdgpu_bo_unreserve(*bo_ptr);
-
return 0;
error_unreserve:
amdgpu_bo_unreserve(*bo_ptr);
error_free:
- amdgpu_bo_unref(bo_ptr);
+ if (free)
+ amdgpu_bo_unref(bo_ptr);
return r;
}
/**
+ * amdgpu_bo_create_kernel - create BO for kernel use
+ *
+ * @adev: amdgpu device object
+ * @size: size for the new BO
+ * @align: alignment for the new BO
+ * @domain: where to place it
+ * @bo_ptr: resulting BO
+ * @gpu_addr: GPU addr of the pinned BO
+ * @cpu_addr: optional CPU address mapping
+ *
+ * Allocates and pins a BO for kernel internal use.
+ *
+ * Returns 0 on success, negative error code otherwise.
+ */
+int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
+ unsigned long size, int align,
+ u32 domain, struct amdgpu_bo **bo_ptr,
+ u64 *gpu_addr, void **cpu_addr)
+{
+ int r;
+
+ r = amdgpu_bo_create_reserved(adev, size, align, domain, bo_ptr,
+ gpu_addr, cpu_addr);
+
+ if (r)
+ return r;
+
+ amdgpu_bo_unreserve(*bo_ptr);
+
+ return 0;
+}
+
+/**
* amdgpu_bo_free_kernel - free BO for kernel use
*
* @bo: amdgpu BO to free
@@ -317,12 +309,13 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
struct sg_table *sg,
struct ttm_placement *placement,
struct reservation_object *resv,
+ uint64_t init_value,
struct amdgpu_bo **bo_ptr)
{
struct amdgpu_bo *bo;
enum ttm_bo_type type;
unsigned long page_align;
- u64 initial_bytes_moved;
+ u64 initial_bytes_moved, bytes_moved;
size_t acc_size;
int r;
@@ -351,13 +344,13 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
}
INIT_LIST_HEAD(&bo->shadow_list);
INIT_LIST_HEAD(&bo->va);
- bo->prefered_domains = domain & (AMDGPU_GEM_DOMAIN_VRAM |
+ bo->preferred_domains = domain & (AMDGPU_GEM_DOMAIN_VRAM |
AMDGPU_GEM_DOMAIN_GTT |
AMDGPU_GEM_DOMAIN_CPU |
AMDGPU_GEM_DOMAIN_GDS |
AMDGPU_GEM_DOMAIN_GWS |
AMDGPU_GEM_DOMAIN_OA);
- bo->allowed_domains = bo->prefered_domains;
+ bo->allowed_domains = bo->preferred_domains;
if (!kernel && bo->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
bo->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
@@ -398,8 +391,14 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, type,
&bo->placement, page_align, !kernel, NULL,
acc_size, sg, resv, &amdgpu_ttm_bo_destroy);
- amdgpu_cs_report_moved_bytes(adev,
- atomic64_read(&adev->num_bytes_moved) - initial_bytes_moved);
+ bytes_moved = atomic64_read(&adev->num_bytes_moved) -
+ initial_bytes_moved;
+ if (adev->mc.visible_vram_size < adev->mc.real_vram_size &&
+ bo->tbo.mem.mem_type == TTM_PL_VRAM &&
+ bo->tbo.mem.start < adev->mc.visible_vram_size >> PAGE_SHIFT)
+ amdgpu_cs_report_moved_bytes(adev, bytes_moved, bytes_moved);
+ else
+ amdgpu_cs_report_moved_bytes(adev, bytes_moved, 0);
if (unlikely(r != 0))
return r;
@@ -411,7 +410,7 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
bo->tbo.mem.placement & TTM_PL_FLAG_VRAM) {
struct dma_fence *fence;
- r = amdgpu_fill_buffer(bo, 0, bo->tbo.resv, &fence);
+ r = amdgpu_fill_buffer(bo, init_value, bo->tbo.resv, &fence);
if (unlikely(r))
goto fail_unreserve;
@@ -426,6 +425,10 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
trace_amdgpu_bo_create(bo);
+ /* Treat CPU_ACCESS_REQUIRED only as a hint if given by UMD */
+ if (type == ttm_bo_type_device)
+ bo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
+
return 0;
fail_unreserve:
@@ -446,19 +449,19 @@ static int amdgpu_bo_create_shadow(struct amdgpu_device *adev,
if (bo->shadow)
return 0;
- bo->flags |= AMDGPU_GEM_CREATE_SHADOW;
- memset(&placements, 0,
- (AMDGPU_GEM_DOMAIN_MAX + 1) * sizeof(struct ttm_place));
-
- amdgpu_ttm_placement_init(adev, &placement,
- placements, AMDGPU_GEM_DOMAIN_GTT,
- AMDGPU_GEM_CREATE_CPU_GTT_USWC);
+ memset(&placements, 0, sizeof(placements));
+ amdgpu_ttm_placement_init(adev, &placement, placements,
+ AMDGPU_GEM_DOMAIN_GTT,
+ AMDGPU_GEM_CREATE_CPU_GTT_USWC |
+ AMDGPU_GEM_CREATE_SHADOW);
r = amdgpu_bo_create_restricted(adev, size, byte_align, true,
AMDGPU_GEM_DOMAIN_GTT,
- AMDGPU_GEM_CREATE_CPU_GTT_USWC,
+ AMDGPU_GEM_CREATE_CPU_GTT_USWC |
+ AMDGPU_GEM_CREATE_SHADOW,
NULL, &placement,
bo->tbo.resv,
+ 0,
&bo->shadow);
if (!r) {
bo->shadow->parent = amdgpu_bo_ref(bo);
@@ -470,39 +473,41 @@ static int amdgpu_bo_create_shadow(struct amdgpu_device *adev,
return r;
}
+/* init_value will only take effect when flags contains
+ * AMDGPU_GEM_CREATE_VRAM_CLEARED.
+ */
int amdgpu_bo_create(struct amdgpu_device *adev,
unsigned long size, int byte_align,
bool kernel, u32 domain, u64 flags,
struct sg_table *sg,
struct reservation_object *resv,
+ uint64_t init_value,
struct amdgpu_bo **bo_ptr)
{
struct ttm_placement placement = {0};
struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1];
+ uint64_t parent_flags = flags & ~AMDGPU_GEM_CREATE_SHADOW;
int r;
- memset(&placements, 0,
- (AMDGPU_GEM_DOMAIN_MAX + 1) * sizeof(struct ttm_place));
-
- amdgpu_ttm_placement_init(adev, &placement,
- placements, domain, flags);
+ memset(&placements, 0, sizeof(placements));
+ amdgpu_ttm_placement_init(adev, &placement, placements,
+ domain, parent_flags);
- r = amdgpu_bo_create_restricted(adev, size, byte_align, kernel,
- domain, flags, sg, &placement,
- resv, bo_ptr);
+ r = amdgpu_bo_create_restricted(adev, size, byte_align, kernel, domain,
+ parent_flags, sg, &placement, resv,
+ init_value, bo_ptr);
if (r)
return r;
- if (amdgpu_need_backup(adev) && (flags & AMDGPU_GEM_CREATE_SHADOW)) {
- if (!resv) {
- r = ww_mutex_lock(&(*bo_ptr)->tbo.resv->lock, NULL);
- WARN_ON(r != 0);
- }
+ if ((flags & AMDGPU_GEM_CREATE_SHADOW) && amdgpu_need_backup(adev)) {
+ if (!resv)
+ WARN_ON(reservation_object_lock((*bo_ptr)->tbo.resv,
+ NULL));
r = amdgpu_bo_create_shadow(adev, size, byte_align, (*bo_ptr));
if (!resv)
- ww_mutex_unlock(&(*bo_ptr)->tbo.resv->lock);
+ reservation_object_unlock((*bo_ptr)->tbo.resv);
if (r)
amdgpu_bo_unref(bo_ptr);
@@ -535,7 +540,7 @@ int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev,
r = amdgpu_copy_buffer(ring, bo_addr, shadow_addr,
amdgpu_bo_size(bo), resv, fence,
- direct);
+ direct, false);
if (!r)
amdgpu_bo_fence(bo, *fence, true);
@@ -551,7 +556,7 @@ int amdgpu_bo_validate(struct amdgpu_bo *bo)
if (bo->pin_count)
return 0;
- domain = bo->prefered_domains;
+ domain = bo->preferred_domains;
retry:
amdgpu_ttm_placement_from_domain(bo, domain);
@@ -588,7 +593,7 @@ int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev,
r = amdgpu_copy_buffer(ring, shadow_addr, bo_addr,
amdgpu_bo_size(bo), resv, fence,
- direct);
+ direct, false);
if (!r)
amdgpu_bo_fence(bo, *fence, true);
@@ -598,16 +603,16 @@ err:
int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
{
- bool is_iomem;
+ void *kptr;
long r;
if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
return -EPERM;
- if (bo->kptr) {
- if (ptr) {
- *ptr = bo->kptr;
- }
+ kptr = amdgpu_bo_kptr(bo);
+ if (kptr) {
+ if (ptr)
+ *ptr = kptr;
return 0;
}
@@ -620,19 +625,23 @@ int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
if (r)
return r;
- bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
if (ptr)
- *ptr = bo->kptr;
+ *ptr = amdgpu_bo_kptr(bo);
return 0;
}
+void *amdgpu_bo_kptr(struct amdgpu_bo *bo)
+{
+ bool is_iomem;
+
+ return ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
+}
+
void amdgpu_bo_kunmap(struct amdgpu_bo *bo)
{
- if (bo->kptr == NULL)
- return;
- bo->kptr = NULL;
- ttm_bo_kunmap(&bo->kmap);
+ if (bo->kmap.bo)
+ ttm_bo_kunmap(&bo->kmap);
}
struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo)
@@ -724,15 +733,16 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
dev_err(adev->dev, "%p pin failed\n", bo);
goto error;
}
- r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem);
- if (unlikely(r)) {
- dev_err(adev->dev, "%p bind failed\n", bo);
- goto error;
- }
bo->pin_count = 1;
- if (gpu_addr != NULL)
+ if (gpu_addr != NULL) {
+ r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem);
+ if (unlikely(r)) {
+ dev_err(adev->dev, "%p bind failed\n", bo);
+ goto error;
+ }
*gpu_addr = amdgpu_bo_gpu_offset(bo);
+ }
if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
adev->vram_pin_size += amdgpu_bo_size(bo);
if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
@@ -921,6 +931,8 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
abo = container_of(bo, struct amdgpu_bo, tbo);
amdgpu_vm_bo_invalidate(adev, abo);
+ amdgpu_bo_kunmap(abo);
+
/* remember the eviction */
if (evict)
atomic64_inc(&adev->num_evictions);
@@ -930,8 +942,6 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
return;
/* move_notify is called before move happens */
- amdgpu_update_memory_usage(adev, &bo->mem, new_mem);
-
trace_amdgpu_ttm_bo_move(abo, new_mem->mem_type, old_mem->mem_type);
}
@@ -939,19 +949,22 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
struct amdgpu_bo *abo;
- unsigned long offset, size, lpfn;
- int i, r;
+ unsigned long offset, size;
+ int r;
if (!amdgpu_ttm_bo_is_amdgpu_bo(bo))
return 0;
abo = container_of(bo, struct amdgpu_bo, tbo);
+
+ /* Remember that this BO was accessed by the CPU */
+ abo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
+
if (bo->mem.mem_type != TTM_PL_VRAM)
return 0;
size = bo->mem.num_pages << PAGE_SHIFT;
offset = bo->mem.start << PAGE_SHIFT;
- /* TODO: figure out how to map scattered VRAM to the CPU */
if ((offset + size) <= adev->mc.visible_vram_size)
return 0;
@@ -961,26 +974,21 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
/* hurrah the memory is not visible ! */
atomic64_inc(&adev->num_vram_cpu_page_faults);
- amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM);
- lpfn = adev->mc.visible_vram_size >> PAGE_SHIFT;
- for (i = 0; i < abo->placement.num_placement; i++) {
- /* Force into visible VRAM */
- if ((abo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
- (!abo->placements[i].lpfn ||
- abo->placements[i].lpfn > lpfn))
- abo->placements[i].lpfn = lpfn;
- }
+ amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT);
+
+ /* Avoid costly evictions; only set GTT as a busy placement */
+ abo->placement.num_busy_placement = 1;
+ abo->placement.busy_placement = &abo->placements[1];
+
r = ttm_bo_validate(bo, &abo->placement, false, false);
- if (unlikely(r == -ENOMEM)) {
- amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT);
- return ttm_bo_validate(bo, &abo->placement, false, false);
- } else if (unlikely(r != 0)) {
+ if (unlikely(r != 0))
return r;
- }
offset = bo->mem.start << PAGE_SHIFT;
/* this should never happen */
- if ((offset + size) > adev->mc.visible_vram_size)
+ if (bo->mem.mem_type == TTM_PL_VRAM &&
+ (offset + size) > adev->mc.visible_vram_size)
return -EINVAL;
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index 382485115b06..a288fa6d72c8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -33,6 +33,61 @@
#define AMDGPU_BO_INVALID_OFFSET LONG_MAX
+/* bo virtual addresses in a vm */
+struct amdgpu_bo_va_mapping {
+ struct list_head list;
+ struct rb_node rb;
+ uint64_t start;
+ uint64_t last;
+ uint64_t __subtree_last;
+ uint64_t offset;
+ uint64_t flags;
+};
+
+/* User space allocated BO in a VM */
+struct amdgpu_bo_va {
+ struct amdgpu_vm_bo_base base;
+
+ /* protected by bo being reserved */
+ struct dma_fence *last_pt_update;
+ unsigned ref_count;
+
+ /* mappings for this bo_va */
+ struct list_head invalids;
+ struct list_head valids;
+};
+
+struct amdgpu_bo {
+ /* Protected by tbo.reserved */
+ u32 preferred_domains;
+ u32 allowed_domains;
+ struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1];
+ struct ttm_placement placement;
+ struct ttm_buffer_object tbo;
+ struct ttm_bo_kmap_obj kmap;
+ u64 flags;
+ unsigned pin_count;
+ u64 tiling_flags;
+ u64 metadata_flags;
+ void *metadata;
+ u32 metadata_size;
+ unsigned prime_shared_count;
+ /* list of all virtual address to which this bo is associated to */
+ struct list_head va;
+ /* Constant after initialization */
+ struct drm_gem_object gem_base;
+ struct amdgpu_bo *parent;
+ struct amdgpu_bo *shadow;
+
+ struct ttm_bo_kmap_obj dma_buf_vmap;
+ struct amdgpu_mn *mn;
+
+ union {
+ struct list_head mn_list;
+ struct list_head shadow_list;
+ };
+};
+
/**
* amdgpu_mem_type_to_domain - return domain corresponding to mem_type
* @mem_type: ttm memory type
@@ -120,7 +175,11 @@ static inline u64 amdgpu_bo_mmap_offset(struct amdgpu_bo *bo)
*/
static inline bool amdgpu_bo_gpu_accessible(struct amdgpu_bo *bo)
{
- return bo->tbo.mem.mem_type != TTM_PL_SYSTEM;
+ switch (bo->tbo.mem.mem_type) {
+ case TTM_PL_TT: return amdgpu_ttm_is_bound(bo->tbo.ttm);
+ case TTM_PL_VRAM: return true;
+ default: return false;
+ }
}
int amdgpu_bo_create(struct amdgpu_device *adev,
@@ -128,6 +187,7 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
bool kernel, u32 domain, u64 flags,
struct sg_table *sg,
struct reservation_object *resv,
+ uint64_t init_value,
struct amdgpu_bo **bo_ptr);
int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
unsigned long size, int byte_align,
@@ -135,7 +195,12 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
struct sg_table *sg,
struct ttm_placement *placement,
struct reservation_object *resv,
+ uint64_t init_value,
struct amdgpu_bo **bo_ptr);
+int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
+ unsigned long size, int align,
+ u32 domain, struct amdgpu_bo **bo_ptr,
+ u64 *gpu_addr, void **cpu_addr);
int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
unsigned long size, int align,
u32 domain, struct amdgpu_bo **bo_ptr,
@@ -143,6 +208,7 @@ int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
void **cpu_addr);
int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr);
+void *amdgpu_bo_kptr(struct amdgpu_bo *bo);
void amdgpu_bo_kunmap(struct amdgpu_bo *bo);
struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo);
void amdgpu_bo_unref(struct amdgpu_bo **bo);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h
index c19c4d138751..f21a7716b90e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h
@@ -30,6 +30,7 @@ struct cg_flag_name
const char *name;
};
+void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev);
int amdgpu_pm_sysfs_init(struct amdgpu_device *adev);
void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev);
void amdgpu_pm_print_power_states(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
index 6bdc866570ab..5b3f92891f89 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
@@ -69,7 +69,7 @@ amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
ww_mutex_lock(&resv->lock, NULL);
ret = amdgpu_bo_create(adev, attach->dmabuf->size, PAGE_SIZE, false,
- AMDGPU_GEM_DOMAIN_GTT, 0, sg, resv, &bo);
+ AMDGPU_GEM_DOMAIN_GTT, 0, sg, resv, 0, &bo);
ww_mutex_unlock(&resv->lock);
if (ret)
return ERR_PTR(ret);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 4083be61b328..8c2204c7b384 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -63,8 +63,13 @@ static int psp_sw_init(void *handle)
psp->smu_reload_quirk = psp_v3_1_smu_reload_quirk;
break;
case CHIP_RAVEN:
+#if 0
+ psp->init_microcode = psp_v10_0_init_microcode;
+#endif
psp->prep_cmd_buf = psp_v10_0_prep_cmd_buf;
psp->ring_init = psp_v10_0_ring_init;
+ psp->ring_create = psp_v10_0_ring_create;
+ psp->ring_destroy = psp_v10_0_ring_destroy;
psp->cmd_submit = psp_v10_0_cmd_submit;
psp->compare_sram_data = psp_v10_0_compare_sram_data;
break;
@@ -95,9 +100,8 @@ int psp_wait_for(struct psp_context *psp, uint32_t reg_index,
int i;
struct amdgpu_device *adev = psp->adev;
- val = RREG32(reg_index);
-
for (i = 0; i < adev->usec_timeout; i++) {
+ val = RREG32(reg_index);
if (check_changed) {
if (val != reg_val)
return 0;
@@ -118,33 +122,18 @@ psp_cmd_submit_buf(struct psp_context *psp,
int index)
{
int ret;
- struct amdgpu_bo *cmd_buf_bo;
- uint64_t cmd_buf_mc_addr;
- struct psp_gfx_cmd_resp *cmd_buf_mem;
- struct amdgpu_device *adev = psp->adev;
-
- ret = amdgpu_bo_create_kernel(adev, PSP_CMD_BUFFER_SIZE, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
- &cmd_buf_bo, &cmd_buf_mc_addr,
- (void **)&cmd_buf_mem);
- if (ret)
- return ret;
- memset(cmd_buf_mem, 0, PSP_CMD_BUFFER_SIZE);
+ memset(psp->cmd_buf_mem, 0, PSP_CMD_BUFFER_SIZE);
- memcpy(cmd_buf_mem, cmd, sizeof(struct psp_gfx_cmd_resp));
+ memcpy(psp->cmd_buf_mem, cmd, sizeof(struct psp_gfx_cmd_resp));
- ret = psp_cmd_submit(psp, ucode, cmd_buf_mc_addr,
+ ret = psp_cmd_submit(psp, ucode, psp->cmd_buf_mc_addr,
fence_mc_addr, index);
while (*((unsigned int *)psp->fence_buf) != index) {
msleep(1);
}
- amdgpu_bo_free_kernel(&cmd_buf_bo,
- &cmd_buf_mc_addr,
- (void **)&cmd_buf_mem);
-
return ret;
}
@@ -352,13 +341,20 @@ static int psp_load_fw(struct amdgpu_device *adev)
&psp->fence_buf_mc_addr,
&psp->fence_buf);
if (ret)
+ goto failed_mem2;
+
+ ret = amdgpu_bo_create_kernel(adev, PSP_CMD_BUFFER_SIZE, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &psp->cmd_buf_bo, &psp->cmd_buf_mc_addr,
+ (void **)&psp->cmd_buf_mem);
+ if (ret)
goto failed_mem1;
memset(psp->fence_buf, 0, PSP_FENCE_BUFFER_SIZE);
ret = psp_ring_init(psp, PSP_RING_TYPE__KM);
if (ret)
- goto failed_mem1;
+ goto failed_mem;
ret = psp_tmr_init(psp);
if (ret)
@@ -379,9 +375,13 @@ static int psp_load_fw(struct amdgpu_device *adev)
return 0;
failed_mem:
+ amdgpu_bo_free_kernel(&psp->cmd_buf_bo,
+ &psp->cmd_buf_mc_addr,
+ (void **)&psp->cmd_buf_mem);
+failed_mem1:
amdgpu_bo_free_kernel(&psp->fence_buf_bo,
&psp->fence_buf_mc_addr, &psp->fence_buf);
-failed_mem1:
+failed_mem2:
amdgpu_bo_free_kernel(&psp->fw_pri_bo,
&psp->fw_pri_mc_addr, &psp->fw_pri_buf);
failed:
@@ -435,16 +435,15 @@ static int psp_hw_fini(void *handle)
psp_ring_destroy(psp, PSP_RING_TYPE__KM);
- if (psp->tmr_buf)
- amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, &psp->tmr_buf);
-
- if (psp->fw_pri_buf)
- amdgpu_bo_free_kernel(&psp->fw_pri_bo,
- &psp->fw_pri_mc_addr, &psp->fw_pri_buf);
-
- if (psp->fence_buf_bo)
- amdgpu_bo_free_kernel(&psp->fence_buf_bo,
- &psp->fence_buf_mc_addr, &psp->fence_buf);
+ amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, &psp->tmr_buf);
+ amdgpu_bo_free_kernel(&psp->fw_pri_bo,
+ &psp->fw_pri_mc_addr, &psp->fw_pri_buf);
+ amdgpu_bo_free_kernel(&psp->fence_buf_bo,
+ &psp->fence_buf_mc_addr, &psp->fence_buf);
+ amdgpu_bo_free_kernel(&psp->asd_shared_bo, &psp->asd_shared_mc_addr,
+ &psp->asd_shared_buf);
+ amdgpu_bo_free_kernel(&psp->cmd_buf_bo, &psp->cmd_buf_mc_addr,
+ (void **)&psp->cmd_buf_mem);
kfree(psp->cmd);
psp->cmd = NULL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
index 1a1c8b469f93..538fa9dbfb21 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
@@ -108,6 +108,11 @@ struct psp_context
struct amdgpu_bo *fence_buf_bo;
uint64_t fence_buf_mc_addr;
void *fence_buf;
+
+ /* cmd buffer */
+ struct amdgpu_bo *cmd_buf_bo;
+ uint64_t cmd_buf_mc_addr;
+ struct psp_gfx_cmd_resp *cmd_buf_mem;
};
struct amdgpu_psp_funcs {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index 75165e07b1cd..5ce65280b396 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -170,6 +170,16 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
unsigned irq_type)
{
int r;
+ int sched_hw_submission = amdgpu_sched_hw_submission;
+
+ /* Set the hw submission limit higher for KIQ because
+ * it's used for a number of gfx/compute tasks by both
+ * KFD and KGD which may have outstanding fences and
+ * it doesn't really use the gpu scheduler anyway;
+ * KIQ tasks get submitted directly to the ring.
+ */
+ if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
+ sched_hw_submission = max(sched_hw_submission, 256);
if (ring->adev == NULL) {
if (adev->num_rings >= AMDGPU_MAX_RINGS)
@@ -178,38 +188,21 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
ring->adev = adev;
ring->idx = adev->num_rings++;
adev->rings[ring->idx] = ring;
- r = amdgpu_fence_driver_init_ring(ring,
- amdgpu_sched_hw_submission);
+ r = amdgpu_fence_driver_init_ring(ring, sched_hw_submission);
if (r)
return r;
}
- if (ring->funcs->support_64bit_ptrs) {
- r = amdgpu_wb_get_64bit(adev, &ring->rptr_offs);
- if (r) {
- dev_err(adev->dev, "(%d) ring rptr_offs wb alloc failed\n", r);
- return r;
- }
-
- r = amdgpu_wb_get_64bit(adev, &ring->wptr_offs);
- if (r) {
- dev_err(adev->dev, "(%d) ring wptr_offs wb alloc failed\n", r);
- return r;
- }
-
- } else {
- r = amdgpu_wb_get(adev, &ring->rptr_offs);
- if (r) {
- dev_err(adev->dev, "(%d) ring rptr_offs wb alloc failed\n", r);
- return r;
- }
-
- r = amdgpu_wb_get(adev, &ring->wptr_offs);
- if (r) {
- dev_err(adev->dev, "(%d) ring wptr_offs wb alloc failed\n", r);
- return r;
- }
+ r = amdgpu_wb_get(adev, &ring->rptr_offs);
+ if (r) {
+ dev_err(adev->dev, "(%d) ring rptr_offs wb alloc failed\n", r);
+ return r;
+ }
+ r = amdgpu_wb_get(adev, &ring->wptr_offs);
+ if (r) {
+ dev_err(adev->dev, "(%d) ring wptr_offs wb alloc failed\n", r);
+ return r;
}
r = amdgpu_wb_get(adev, &ring->fence_offs);
@@ -234,8 +227,7 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
return r;
}
- ring->ring_size = roundup_pow_of_two(max_dw * 4 *
- amdgpu_sched_hw_submission);
+ ring->ring_size = roundup_pow_of_two(max_dw * 4 * sched_hw_submission);
ring->buf_mask = (ring->ring_size / 4) - 1;
ring->ptr_mask = ring->funcs->support_64bit_ptrs ?
@@ -277,18 +269,15 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring)
{
ring->ready = false;
- if (ring->funcs->support_64bit_ptrs) {
- amdgpu_wb_free_64bit(ring->adev, ring->cond_exe_offs);
- amdgpu_wb_free_64bit(ring->adev, ring->fence_offs);
- amdgpu_wb_free_64bit(ring->adev, ring->rptr_offs);
- amdgpu_wb_free_64bit(ring->adev, ring->wptr_offs);
- } else {
- amdgpu_wb_free(ring->adev, ring->cond_exe_offs);
- amdgpu_wb_free(ring->adev, ring->fence_offs);
- amdgpu_wb_free(ring->adev, ring->rptr_offs);
- amdgpu_wb_free(ring->adev, ring->wptr_offs);
- }
+ /* Not to finish a ring which is not initialized */
+ if (!(ring->adev) || !(ring->adev->rings[ring->idx]))
+ return;
+
+ amdgpu_wb_free(ring->adev, ring->rptr_offs);
+ amdgpu_wb_free(ring->adev, ring->wptr_offs);
+ amdgpu_wb_free(ring->adev, ring->cond_exe_offs);
+ amdgpu_wb_free(ring->adev, ring->fence_offs);
amdgpu_bo_free_kernel(&ring->ring_obj,
&ring->gpu_addr,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
index bc8dec992f73..322d25299a00 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -212,4 +212,44 @@ static inline void amdgpu_ring_clear_ring(struct amdgpu_ring *ring)
}
+static inline void amdgpu_ring_write(struct amdgpu_ring *ring, uint32_t v)
+{
+ if (ring->count_dw <= 0)
+ DRM_ERROR("amdgpu: writing more dwords to the ring than expected!\n");
+ ring->ring[ring->wptr++ & ring->buf_mask] = v;
+ ring->wptr &= ring->ptr_mask;
+ ring->count_dw--;
+}
+
+static inline void amdgpu_ring_write_multiple(struct amdgpu_ring *ring,
+ void *src, int count_dw)
+{
+ unsigned occupied, chunk1, chunk2;
+ void *dst;
+
+ if (unlikely(ring->count_dw < count_dw))
+ DRM_ERROR("amdgpu: writing more dwords to the ring than expected!\n");
+
+ occupied = ring->wptr & ring->buf_mask;
+ dst = (void *)&ring->ring[occupied];
+ chunk1 = ring->buf_mask + 1 - occupied;
+ chunk1 = (chunk1 >= count_dw) ? count_dw: chunk1;
+ chunk2 = count_dw - chunk1;
+ chunk1 <<= 2;
+ chunk2 <<= 2;
+
+ if (chunk1)
+ memcpy(dst, src, chunk1);
+
+ if (chunk2) {
+ src += chunk1;
+ dst = (void *)ring->ring;
+ memcpy(dst, src, chunk2);
+ }
+
+ ring->wptr += count_dw;
+ ring->wptr &= ring->ptr_mask;
+ ring->count_dw -= count_dw;
+}
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
index 5ca75a456ad2..3144400435b7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
@@ -64,7 +64,7 @@ int amdgpu_sa_bo_manager_init(struct amdgpu_device *adev,
INIT_LIST_HEAD(&sa_manager->flist[i]);
r = amdgpu_bo_create(adev, size, align, true, domain,
- 0, NULL, NULL, &sa_manager->bo);
+ 0, NULL, NULL, 0, &sa_manager->bo);
if (r) {
dev_err(adev->dev, "(%d) failed to allocate bo for manager\n", r);
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
index a6899180b265..c586f44312f9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
@@ -244,6 +244,12 @@ struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
struct dma_fence *f = e->fence;
struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
+ if (dma_fence_is_signaled(f)) {
+ hash_del(&e->node);
+ dma_fence_put(f);
+ kmem_cache_free(amdgpu_sync_slab, e);
+ continue;
+ }
if (ring && s_fence) {
/* For fences from the same ring it is sufficient
* when they are scheduled.
@@ -256,13 +262,6 @@ struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
}
}
- if (dma_fence_is_signaled(f)) {
- hash_del(&e->node);
- dma_fence_put(f);
- kmem_cache_free(amdgpu_sync_slab, e);
- continue;
- }
-
return f;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
index 15510dadde01..ed8c3739015b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
@@ -33,7 +33,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
struct amdgpu_bo *vram_obj = NULL;
struct amdgpu_bo **gtt_obj = NULL;
- uint64_t gtt_addr, vram_addr;
+ uint64_t gart_addr, vram_addr;
unsigned n, size;
int i, r;
@@ -42,7 +42,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
/* Number of tests =
* (Total GTT - IB pool - writeback page - ring buffers) / test size
*/
- n = adev->mc.gtt_size - AMDGPU_IB_POOL_SIZE*64*1024;
+ n = adev->mc.gart_size - AMDGPU_IB_POOL_SIZE*64*1024;
for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
if (adev->rings[i])
n -= adev->rings[i]->ring_size;
@@ -61,7 +61,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
r = amdgpu_bo_create(adev, size, PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_VRAM, 0,
- NULL, NULL, &vram_obj);
+ NULL, NULL, 0, &vram_obj);
if (r) {
DRM_ERROR("Failed to create VRAM object\n");
goto out_cleanup;
@@ -76,13 +76,13 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
}
for (i = 0; i < n; i++) {
void *gtt_map, *vram_map;
- void **gtt_start, **gtt_end;
+ void **gart_start, **gart_end;
void **vram_start, **vram_end;
struct dma_fence *fence = NULL;
r = amdgpu_bo_create(adev, size, PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_GTT, 0, NULL,
- NULL, gtt_obj + i);
+ NULL, 0, gtt_obj + i);
if (r) {
DRM_ERROR("Failed to create GTT object %d\n", i);
goto out_lclean;
@@ -91,7 +91,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
r = amdgpu_bo_reserve(gtt_obj[i], false);
if (unlikely(r != 0))
goto out_lclean_unref;
- r = amdgpu_bo_pin(gtt_obj[i], AMDGPU_GEM_DOMAIN_GTT, &gtt_addr);
+ r = amdgpu_bo_pin(gtt_obj[i], AMDGPU_GEM_DOMAIN_GTT, &gart_addr);
if (r) {
DRM_ERROR("Failed to pin GTT object %d\n", i);
goto out_lclean_unres;
@@ -103,15 +103,15 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
goto out_lclean_unpin;
}
- for (gtt_start = gtt_map, gtt_end = gtt_map + size;
- gtt_start < gtt_end;
- gtt_start++)
- *gtt_start = gtt_start;
+ for (gart_start = gtt_map, gart_end = gtt_map + size;
+ gart_start < gart_end;
+ gart_start++)
+ *gart_start = gart_start;
amdgpu_bo_kunmap(gtt_obj[i]);
- r = amdgpu_copy_buffer(ring, gtt_addr, vram_addr,
- size, NULL, &fence, false);
+ r = amdgpu_copy_buffer(ring, gart_addr, vram_addr,
+ size, NULL, &fence, false, false);
if (r) {
DRM_ERROR("Failed GTT->VRAM copy %d\n", i);
@@ -132,21 +132,21 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
goto out_lclean_unpin;
}
- for (gtt_start = gtt_map, gtt_end = gtt_map + size,
+ for (gart_start = gtt_map, gart_end = gtt_map + size,
vram_start = vram_map, vram_end = vram_map + size;
vram_start < vram_end;
- gtt_start++, vram_start++) {
- if (*vram_start != gtt_start) {
+ gart_start++, vram_start++) {
+ if (*vram_start != gart_start) {
DRM_ERROR("Incorrect GTT->VRAM copy %d: Got 0x%p, "
"expected 0x%p (GTT/VRAM offset "
"0x%16llx/0x%16llx)\n",
- i, *vram_start, gtt_start,
+ i, *vram_start, gart_start,
(unsigned long long)
- (gtt_addr - adev->mc.gtt_start +
- (void*)gtt_start - gtt_map),
+ (gart_addr - adev->mc.gart_start +
+ (void*)gart_start - gtt_map),
(unsigned long long)
(vram_addr - adev->mc.vram_start +
- (void*)gtt_start - gtt_map));
+ (void*)gart_start - gtt_map));
amdgpu_bo_kunmap(vram_obj);
goto out_lclean_unpin;
}
@@ -155,8 +155,8 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
amdgpu_bo_kunmap(vram_obj);
- r = amdgpu_copy_buffer(ring, vram_addr, gtt_addr,
- size, NULL, &fence, false);
+ r = amdgpu_copy_buffer(ring, vram_addr, gart_addr,
+ size, NULL, &fence, false, false);
if (r) {
DRM_ERROR("Failed VRAM->GTT copy %d\n", i);
@@ -177,20 +177,20 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
goto out_lclean_unpin;
}
- for (gtt_start = gtt_map, gtt_end = gtt_map + size,
+ for (gart_start = gtt_map, gart_end = gtt_map + size,
vram_start = vram_map, vram_end = vram_map + size;
- gtt_start < gtt_end;
- gtt_start++, vram_start++) {
- if (*gtt_start != vram_start) {
+ gart_start < gart_end;
+ gart_start++, vram_start++) {
+ if (*gart_start != vram_start) {
DRM_ERROR("Incorrect VRAM->GTT copy %d: Got 0x%p, "
"expected 0x%p (VRAM/GTT offset "
"0x%16llx/0x%16llx)\n",
- i, *gtt_start, vram_start,
+ i, *gart_start, vram_start,
(unsigned long long)
(vram_addr - adev->mc.vram_start +
(void*)vram_start - vram_map),
(unsigned long long)
- (gtt_addr - adev->mc.gtt_start +
+ (gart_addr - adev->mc.gart_start +
(void*)vram_start - vram_map));
amdgpu_bo_kunmap(gtt_obj[i]);
goto out_lclean_unpin;
@@ -200,7 +200,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
amdgpu_bo_kunmap(gtt_obj[i]);
DRM_INFO("Tested GTT->VRAM and VRAM->GTT copy for GTT offset 0x%llx\n",
- gtt_addr - adev->mc.gtt_start);
+ gart_addr - adev->mc.gart_start);
continue;
out_lclean_unpin:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index 8601904e670a..1c88bd5e29ad 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -14,6 +14,62 @@
#define AMDGPU_JOB_GET_TIMELINE_NAME(job) \
job->base.s_fence->finished.ops->get_timeline_name(&job->base.s_fence->finished)
+TRACE_EVENT(amdgpu_ttm_tt_populate,
+ TP_PROTO(struct amdgpu_device *adev, uint64_t dma_address, uint64_t phys_address),
+ TP_ARGS(adev, dma_address, phys_address),
+ TP_STRUCT__entry(
+ __field(uint16_t, domain)
+ __field(uint8_t, bus)
+ __field(uint8_t, slot)
+ __field(uint8_t, func)
+ __field(uint64_t, dma)
+ __field(uint64_t, phys)
+ ),
+ TP_fast_assign(
+ __entry->domain = pci_domain_nr(adev->pdev->bus);
+ __entry->bus = adev->pdev->bus->number;
+ __entry->slot = PCI_SLOT(adev->pdev->devfn);
+ __entry->func = PCI_FUNC(adev->pdev->devfn);
+ __entry->dma = dma_address;
+ __entry->phys = phys_address;
+ ),
+ TP_printk("%04x:%02x:%02x.%x: 0x%llx => 0x%llx",
+ (unsigned)__entry->domain,
+ (unsigned)__entry->bus,
+ (unsigned)__entry->slot,
+ (unsigned)__entry->func,
+ (unsigned long long)__entry->dma,
+ (unsigned long long)__entry->phys)
+);
+
+TRACE_EVENT(amdgpu_ttm_tt_unpopulate,
+ TP_PROTO(struct amdgpu_device *adev, uint64_t dma_address, uint64_t phys_address),
+ TP_ARGS(adev, dma_address, phys_address),
+ TP_STRUCT__entry(
+ __field(uint16_t, domain)
+ __field(uint8_t, bus)
+ __field(uint8_t, slot)
+ __field(uint8_t, func)
+ __field(uint64_t, dma)
+ __field(uint64_t, phys)
+ ),
+ TP_fast_assign(
+ __entry->domain = pci_domain_nr(adev->pdev->bus);
+ __entry->bus = adev->pdev->bus->number;
+ __entry->slot = PCI_SLOT(adev->pdev->devfn);
+ __entry->func = PCI_FUNC(adev->pdev->devfn);
+ __entry->dma = dma_address;
+ __entry->phys = phys_address;
+ ),
+ TP_printk("%04x:%02x:%02x.%x: 0x%llx => 0x%llx",
+ (unsigned)__entry->domain,
+ (unsigned)__entry->bus,
+ (unsigned)__entry->slot,
+ (unsigned)__entry->func,
+ (unsigned long long)__entry->dma,
+ (unsigned long long)__entry->phys)
+);
+
TRACE_EVENT(amdgpu_mm_rreg,
TP_PROTO(unsigned did, uint32_t reg, uint32_t value),
TP_ARGS(did, reg, value),
@@ -105,12 +161,12 @@ TRACE_EVENT(amdgpu_bo_create,
__entry->bo = bo;
__entry->pages = bo->tbo.num_pages;
__entry->type = bo->tbo.mem.mem_type;
- __entry->prefer = bo->prefered_domains;
+ __entry->prefer = bo->preferred_domains;
__entry->allow = bo->allowed_domains;
__entry->visible = bo->flags;
),
- TP_printk("bo=%p, pages=%u, type=%d, prefered=%d, allowed=%d, visible=%d",
+ TP_printk("bo=%p, pages=%u, type=%d, preferred=%d, allowed=%d, visible=%d",
__entry->bo, __entry->pages, __entry->type,
__entry->prefer, __entry->allow, __entry->visible)
);
@@ -224,17 +280,17 @@ TRACE_EVENT(amdgpu_vm_bo_map,
__field(long, start)
__field(long, last)
__field(u64, offset)
- __field(u32, flags)
+ __field(u64, flags)
),
TP_fast_assign(
- __entry->bo = bo_va ? bo_va->bo : NULL;
+ __entry->bo = bo_va ? bo_va->base.bo : NULL;
__entry->start = mapping->start;
__entry->last = mapping->last;
__entry->offset = mapping->offset;
__entry->flags = mapping->flags;
),
- TP_printk("bo=%p, start=%lx, last=%lx, offset=%010llx, flags=%08x",
+ TP_printk("bo=%p, start=%lx, last=%lx, offset=%010llx, flags=%llx",
__entry->bo, __entry->start, __entry->last,
__entry->offset, __entry->flags)
);
@@ -248,17 +304,17 @@ TRACE_EVENT(amdgpu_vm_bo_unmap,
__field(long, start)
__field(long, last)
__field(u64, offset)
- __field(u32, flags)
+ __field(u64, flags)
),
TP_fast_assign(
- __entry->bo = bo_va->bo;
+ __entry->bo = bo_va->base.bo;
__entry->start = mapping->start;
__entry->last = mapping->last;
__entry->offset = mapping->offset;
__entry->flags = mapping->flags;
),
- TP_printk("bo=%p, start=%lx, last=%lx, offset=%010llx, flags=%08x",
+ TP_printk("bo=%p, start=%lx, last=%lx, offset=%010llx, flags=%llx",
__entry->bo, __entry->start, __entry->last,
__entry->offset, __entry->flags)
);
@@ -269,7 +325,7 @@ DECLARE_EVENT_CLASS(amdgpu_vm_mapping,
TP_STRUCT__entry(
__field(u64, soffset)
__field(u64, eoffset)
- __field(u32, flags)
+ __field(u64, flags)
),
TP_fast_assign(
@@ -277,7 +333,7 @@ DECLARE_EVENT_CLASS(amdgpu_vm_mapping,
__entry->eoffset = mapping->last + 1;
__entry->flags = mapping->flags;
),
- TP_printk("soffs=%010llx, eoffs=%010llx, flags=%08x",
+ TP_printk("soffs=%010llx, eoffs=%010llx, flags=%llx",
__entry->soffset, __entry->eoffset, __entry->flags)
);
@@ -293,14 +349,14 @@ DEFINE_EVENT(amdgpu_vm_mapping, amdgpu_vm_bo_mapping,
TRACE_EVENT(amdgpu_vm_set_ptes,
TP_PROTO(uint64_t pe, uint64_t addr, unsigned count,
- uint32_t incr, uint32_t flags),
+ uint32_t incr, uint64_t flags),
TP_ARGS(pe, addr, count, incr, flags),
TP_STRUCT__entry(
__field(u64, pe)
__field(u64, addr)
__field(u32, count)
__field(u32, incr)
- __field(u32, flags)
+ __field(u64, flags)
),
TP_fast_assign(
@@ -310,7 +366,7 @@ TRACE_EVENT(amdgpu_vm_set_ptes,
__entry->incr = incr;
__entry->flags = flags;
),
- TP_printk("pe=%010Lx, addr=%010Lx, incr=%u, flags=%08x, count=%u",
+ TP_printk("pe=%010Lx, addr=%010Lx, incr=%u, flags=%llx, count=%u",
__entry->pe, __entry->addr, __entry->incr,
__entry->flags, __entry->count)
);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index c9b131b13ef7..7ef6c28a34d9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -43,14 +43,20 @@
#include <linux/pagemap.h>
#include <linux/debugfs.h>
#include "amdgpu.h"
+#include "amdgpu_trace.h"
#include "bif/bif_4_1_d.h"
#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
+static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
+ struct ttm_mem_reg *mem, unsigned num_pages,
+ uint64_t offset, unsigned window,
+ struct amdgpu_ring *ring,
+ uint64_t *addr);
+
static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev);
static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev);
-
/*
* Global memory.
*/
@@ -97,6 +103,8 @@ static int amdgpu_ttm_global_init(struct amdgpu_device *adev)
goto error_bo;
}
+ mutex_init(&adev->mman.gtt_window_lock);
+
ring = adev->mman.buffer_funcs_ring;
rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_KERNEL];
r = amd_sched_entity_init(&ring->sched, &adev->mman.entity,
@@ -123,6 +131,7 @@ static void amdgpu_ttm_global_fini(struct amdgpu_device *adev)
if (adev->mman.mem_global_referenced) {
amd_sched_entity_fini(adev->mman.entity.sched,
&adev->mman.entity);
+ mutex_destroy(&adev->mman.gtt_window_lock);
drm_global_item_unref(&adev->mman.bo_global_ref.ref);
drm_global_item_unref(&adev->mman.mem_global_ref);
adev->mman.mem_global_referenced = false;
@@ -150,7 +159,7 @@ static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
break;
case TTM_PL_TT:
man->func = &amdgpu_gtt_mgr_func;
- man->gpu_offset = adev->mc.gtt_start;
+ man->gpu_offset = adev->mc.gart_start;
man->available_caching = TTM_PL_MASK_CACHING;
man->default_caching = TTM_PL_FLAG_CACHED;
man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA;
@@ -186,12 +195,11 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
struct amdgpu_bo *abo;
- static struct ttm_place placements = {
+ static const struct ttm_place placements = {
.fpfn = 0,
.lpfn = 0,
.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM
};
- unsigned i;
if (!amdgpu_ttm_bo_is_amdgpu_bo(bo)) {
placement->placement = &placements;
@@ -207,22 +215,36 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
adev->mman.buffer_funcs_ring &&
adev->mman.buffer_funcs_ring->ready == false) {
amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
+ } else if (adev->mc.visible_vram_size < adev->mc.real_vram_size &&
+ !(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)) {
+ unsigned fpfn = adev->mc.visible_vram_size >> PAGE_SHIFT;
+ struct drm_mm_node *node = bo->mem.mm_node;
+ unsigned long pages_left;
+
+ for (pages_left = bo->mem.num_pages;
+ pages_left;
+ pages_left -= node->size, node++) {
+ if (node->start < fpfn)
+ break;
+ }
+
+ if (!pages_left)
+ goto gtt;
+
+ /* Try evicting to the CPU inaccessible part of VRAM
+ * first, but only set GTT as busy placement, so this
+ * BO will be evicted to GTT rather than causing other
+ * BOs to be evicted from VRAM
+ */
+ amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT);
+ abo->placements[0].fpfn = fpfn;
+ abo->placements[0].lpfn = 0;
+ abo->placement.busy_placement = &abo->placements[1];
+ abo->placement.num_busy_placement = 1;
} else {
+gtt:
amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT);
- for (i = 0; i < abo->placement.num_placement; ++i) {
- if (!(abo->placements[i].flags &
- TTM_PL_FLAG_TT))
- continue;
-
- if (abo->placements[i].lpfn)
- continue;
-
- /* set an upper limit to force directly
- * allocating address space for the BO.
- */
- abo->placements[i].lpfn =
- adev->mc.gtt_size >> PAGE_SHIFT;
- }
}
break;
case TTM_PL_TT:
@@ -252,29 +274,18 @@ static void amdgpu_move_null(struct ttm_buffer_object *bo,
new_mem->mm_node = NULL;
}
-static int amdgpu_mm_node_addr(struct ttm_buffer_object *bo,
- struct drm_mm_node *mm_node,
- struct ttm_mem_reg *mem,
- uint64_t *addr)
+static uint64_t amdgpu_mm_node_addr(struct ttm_buffer_object *bo,
+ struct drm_mm_node *mm_node,
+ struct ttm_mem_reg *mem)
{
- int r;
-
- switch (mem->mem_type) {
- case TTM_PL_TT:
- r = amdgpu_ttm_bind(bo, mem);
- if (r)
- return r;
+ uint64_t addr = 0;
- case TTM_PL_VRAM:
- *addr = mm_node->start << PAGE_SHIFT;
- *addr += bo->bdev->man[mem->mem_type].gpu_offset;
- break;
- default:
- DRM_ERROR("Unknown placement %d\n", mem->mem_type);
- return -EINVAL;
+ if (mem->mem_type != TTM_PL_TT ||
+ amdgpu_gtt_mgr_is_allocated(mem)) {
+ addr = mm_node->start << PAGE_SHIFT;
+ addr += bo->bdev->man[mem->mem_type].gpu_offset;
}
-
- return 0;
+ return addr;
}
static int amdgpu_move_blit(struct ttm_buffer_object *bo,
@@ -299,26 +310,40 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
}
old_mm = old_mem->mm_node;
- r = amdgpu_mm_node_addr(bo, old_mm, old_mem, &old_start);
- if (r)
- return r;
old_size = old_mm->size;
-
+ old_start = amdgpu_mm_node_addr(bo, old_mm, old_mem);
new_mm = new_mem->mm_node;
- r = amdgpu_mm_node_addr(bo, new_mm, new_mem, &new_start);
- if (r)
- return r;
new_size = new_mm->size;
+ new_start = amdgpu_mm_node_addr(bo, new_mm, new_mem);
num_pages = new_mem->num_pages;
+ mutex_lock(&adev->mman.gtt_window_lock);
while (num_pages) {
- unsigned long cur_pages = min(old_size, new_size);
+ unsigned long cur_pages = min(min(old_size, new_size),
+ (u64)AMDGPU_GTT_MAX_TRANSFER_SIZE);
+ uint64_t from = old_start, to = new_start;
struct dma_fence *next;
- r = amdgpu_copy_buffer(ring, old_start, new_start,
+ if (old_mem->mem_type == TTM_PL_TT &&
+ !amdgpu_gtt_mgr_is_allocated(old_mem)) {
+ r = amdgpu_map_buffer(bo, old_mem, cur_pages,
+ old_start, 0, ring, &from);
+ if (r)
+ goto error;
+ }
+
+ if (new_mem->mem_type == TTM_PL_TT &&
+ !amdgpu_gtt_mgr_is_allocated(new_mem)) {
+ r = amdgpu_map_buffer(bo, new_mem, cur_pages,
+ new_start, 1, ring, &to);
+ if (r)
+ goto error;
+ }
+
+ r = amdgpu_copy_buffer(ring, from, to,
cur_pages * PAGE_SIZE,
- bo->resv, &next, false);
+ bo->resv, &next, false, true);
if (r)
goto error;
@@ -331,10 +356,7 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
old_size -= cur_pages;
if (!old_size) {
- r = amdgpu_mm_node_addr(bo, ++old_mm, old_mem,
- &old_start);
- if (r)
- goto error;
+ old_start = amdgpu_mm_node_addr(bo, ++old_mm, old_mem);
old_size = old_mm->size;
} else {
old_start += cur_pages * PAGE_SIZE;
@@ -342,22 +364,21 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
new_size -= cur_pages;
if (!new_size) {
- r = amdgpu_mm_node_addr(bo, ++new_mm, new_mem,
- &new_start);
- if (r)
- goto error;
-
+ new_start = amdgpu_mm_node_addr(bo, ++new_mm, new_mem);
new_size = new_mm->size;
} else {
new_start += cur_pages * PAGE_SIZE;
}
}
+ mutex_unlock(&adev->mman.gtt_window_lock);
r = ttm_bo_pipeline_move(bo, fence, evict, new_mem);
dma_fence_put(fence);
return r;
error:
+ mutex_unlock(&adev->mman.gtt_window_lock);
+
if (fence)
dma_fence_wait(fence, false);
dma_fence_put(fence);
@@ -384,7 +405,7 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo,
placement.num_busy_placement = 1;
placement.busy_placement = &placements;
placements.fpfn = 0;
- placements.lpfn = adev->mc.gtt_size >> PAGE_SHIFT;
+ placements.lpfn = 0;
placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
interruptible, no_wait_gpu);
@@ -431,7 +452,7 @@ static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo,
placement.num_busy_placement = 1;
placement.busy_placement = &placements;
placements.fpfn = 0;
- placements.lpfn = adev->mc.gtt_size >> PAGE_SHIFT;
+ placements.lpfn = 0;
placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
interruptible, no_wait_gpu);
@@ -507,6 +528,15 @@ memcpy:
}
}
+ if (bo->type == ttm_bo_type_device &&
+ new_mem->mem_type == TTM_PL_VRAM &&
+ old_mem->mem_type != TTM_PL_VRAM) {
+ /* amdgpu_bo_fault_reserve_notify will re-set this if the CPU
+ * accesses the BO after it's moved.
+ */
+ abo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
+ }
+
/* update statistics */
atomic64_add((u64)bo->num_pages << PAGE_SHIFT, &adev->num_bytes_moved);
return 0;
@@ -633,6 +663,38 @@ release_pages:
return r;
}
+static void amdgpu_trace_dma_map(struct ttm_tt *ttm)
+{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
+ struct amdgpu_ttm_tt *gtt = (void *)ttm;
+ unsigned i;
+
+ if (unlikely(trace_amdgpu_ttm_tt_populate_enabled())) {
+ for (i = 0; i < ttm->num_pages; i++) {
+ trace_amdgpu_ttm_tt_populate(
+ adev,
+ gtt->ttm.dma_address[i],
+ page_to_phys(ttm->pages[i]));
+ }
+ }
+}
+
+static void amdgpu_trace_dma_unmap(struct ttm_tt *ttm)
+{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
+ struct amdgpu_ttm_tt *gtt = (void *)ttm;
+ unsigned i;
+
+ if (unlikely(trace_amdgpu_ttm_tt_unpopulate_enabled())) {
+ for (i = 0; i < ttm->num_pages; i++) {
+ trace_amdgpu_ttm_tt_unpopulate(
+ adev,
+ gtt->ttm.dma_address[i],
+ page_to_phys(ttm->pages[i]));
+ }
+ }
+}
+
/* prepare the sg table with the user pages */
static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm)
{
@@ -659,6 +721,8 @@ static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm)
drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
gtt->ttm.dma_address, ttm->num_pages);
+ amdgpu_trace_dma_map(ttm);
+
return 0;
release_sg:
@@ -692,6 +756,8 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
put_page(page);
}
+ amdgpu_trace_dma_unmap(ttm);
+
sg_free_table(ttm->sg);
}
@@ -699,7 +765,8 @@ static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm,
struct ttm_mem_reg *bo_mem)
{
struct amdgpu_ttm_tt *gtt = (void*)ttm;
- int r;
+ uint64_t flags;
+ int r = 0;
if (gtt->userptr) {
r = amdgpu_ttm_tt_pin_userptr(ttm);
@@ -718,7 +785,25 @@ static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm,
bo_mem->mem_type == AMDGPU_PL_OA)
return -EINVAL;
- return 0;
+ if (!amdgpu_gtt_mgr_is_allocated(bo_mem))
+ return 0;
+
+ spin_lock(&gtt->adev->gtt_list_lock);
+ flags = amdgpu_ttm_tt_pte_flags(gtt->adev, ttm, bo_mem);
+ gtt->offset = (u64)bo_mem->start << PAGE_SHIFT;
+ r = amdgpu_gart_bind(gtt->adev, gtt->offset, ttm->num_pages,
+ ttm->pages, gtt->ttm.dma_address, flags);
+
+ if (r) {
+ DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
+ ttm->num_pages, gtt->offset);
+ goto error_gart_bind;
+ }
+
+ list_add_tail(&gtt->list, &gtt->adev->gtt_list);
+error_gart_bind:
+ spin_unlock(&gtt->adev->gtt_list_lock);
+ return r;
}
bool amdgpu_ttm_is_bound(struct ttm_tt *ttm)
@@ -730,36 +815,38 @@ bool amdgpu_ttm_is_bound(struct ttm_tt *ttm)
int amdgpu_ttm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *bo_mem)
{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
struct ttm_tt *ttm = bo->ttm;
- struct amdgpu_ttm_tt *gtt = (void *)bo->ttm;
- uint64_t flags;
+ struct ttm_mem_reg tmp;
+
+ struct ttm_placement placement;
+ struct ttm_place placements;
int r;
if (!ttm || amdgpu_ttm_is_bound(ttm))
return 0;
- r = amdgpu_gtt_mgr_alloc(&bo->bdev->man[TTM_PL_TT], bo,
- NULL, bo_mem);
- if (r) {
- DRM_ERROR("Failed to allocate GTT address space (%d)\n", r);
- return r;
- }
+ tmp = bo->mem;
+ tmp.mm_node = NULL;
+ placement.num_placement = 1;
+ placement.placement = &placements;
+ placement.num_busy_placement = 1;
+ placement.busy_placement = &placements;
+ placements.fpfn = 0;
+ placements.lpfn = adev->mc.gart_size >> PAGE_SHIFT;
+ placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
- spin_lock(&gtt->adev->gtt_list_lock);
- flags = amdgpu_ttm_tt_pte_flags(gtt->adev, ttm, bo_mem);
- gtt->offset = (u64)bo_mem->start << PAGE_SHIFT;
- r = amdgpu_gart_bind(gtt->adev, gtt->offset, ttm->num_pages,
- ttm->pages, gtt->ttm.dma_address, flags);
+ r = ttm_bo_mem_space(bo, &placement, &tmp, true, false);
+ if (unlikely(r))
+ return r;
- if (r) {
- DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
- ttm->num_pages, gtt->offset);
- goto error_gart_bind;
- }
+ r = ttm_bo_move_ttm(bo, true, false, &tmp);
+ if (unlikely(r))
+ ttm_bo_mem_put(bo, &tmp);
+ else
+ bo->offset = (bo->mem.start << PAGE_SHIFT) +
+ bo->bdev->man[bo->mem.mem_type].gpu_offset;
- list_add_tail(&gtt->list, &gtt->adev->gtt_list);
-error_gart_bind:
- spin_unlock(&gtt->adev->gtt_list_lock);
return r;
}
@@ -852,7 +939,7 @@ static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_bo_device *bdev,
static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm)
{
- struct amdgpu_device *adev;
+ struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
struct amdgpu_ttm_tt *gtt = (void *)ttm;
unsigned i;
int r;
@@ -875,14 +962,14 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm)
drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
gtt->ttm.dma_address, ttm->num_pages);
ttm->state = tt_unbound;
- return 0;
+ r = 0;
+ goto trace_mappings;
}
- adev = amdgpu_ttm_adev(ttm->bdev);
-
#ifdef CONFIG_SWIOTLB
if (swiotlb_nr_tbl()) {
- return ttm_dma_populate(&gtt->ttm, adev->dev);
+ r = ttm_dma_populate(&gtt->ttm, adev->dev);
+ goto trace_mappings;
}
#endif
@@ -905,7 +992,12 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm)
return -EFAULT;
}
}
- return 0;
+
+ r = 0;
+trace_mappings:
+ if (likely(!r))
+ amdgpu_trace_dma_map(ttm);
+ return r;
}
static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm)
@@ -926,6 +1018,8 @@ static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm)
adev = amdgpu_ttm_adev(ttm->bdev);
+ amdgpu_trace_dma_unmap(ttm);
+
#ifdef CONFIG_SWIOTLB
if (swiotlb_nr_tbl()) {
ttm_dma_unpopulate(&gtt->ttm, adev->dev);
@@ -1075,6 +1169,67 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
return ttm_bo_eviction_valuable(bo, place);
}
+static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
+ unsigned long offset,
+ void *buf, int len, int write)
+{
+ struct amdgpu_bo *abo = container_of(bo, struct amdgpu_bo, tbo);
+ struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
+ struct drm_mm_node *nodes = abo->tbo.mem.mm_node;
+ uint32_t value = 0;
+ int ret = 0;
+ uint64_t pos;
+ unsigned long flags;
+
+ if (bo->mem.mem_type != TTM_PL_VRAM)
+ return -EIO;
+
+ while (offset >= (nodes->size << PAGE_SHIFT)) {
+ offset -= nodes->size << PAGE_SHIFT;
+ ++nodes;
+ }
+ pos = (nodes->start << PAGE_SHIFT) + offset;
+
+ while (len && pos < adev->mc.mc_vram_size) {
+ uint64_t aligned_pos = pos & ~(uint64_t)3;
+ uint32_t bytes = 4 - (pos & 3);
+ uint32_t shift = (pos & 3) * 8;
+ uint32_t mask = 0xffffffff << shift;
+
+ if (len < bytes) {
+ mask &= 0xffffffff >> (bytes - len) * 8;
+ bytes = len;
+ }
+
+ spin_lock_irqsave(&adev->mmio_idx_lock, flags);
+ WREG32(mmMM_INDEX, ((uint32_t)aligned_pos) | 0x80000000);
+ WREG32(mmMM_INDEX_HI, aligned_pos >> 31);
+ if (!write || mask != 0xffffffff)
+ value = RREG32(mmMM_DATA);
+ if (write) {
+ value &= ~mask;
+ value |= (*(uint32_t *)buf << shift) & mask;
+ WREG32(mmMM_DATA, value);
+ }
+ spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
+ if (!write) {
+ value = (value & mask) >> shift;
+ memcpy(buf, &value, bytes);
+ }
+
+ ret += bytes;
+ buf = (uint8_t *)buf + bytes;
+ pos += bytes;
+ len -= bytes;
+ if (pos >= (nodes->start + nodes->size) << PAGE_SHIFT) {
+ ++nodes;
+ pos = (nodes->start << PAGE_SHIFT);
+ }
+ }
+
+ return ret;
+}
+
static struct ttm_bo_driver amdgpu_bo_driver = {
.ttm_tt_create = &amdgpu_ttm_tt_create,
.ttm_tt_populate = &amdgpu_ttm_tt_populate,
@@ -1090,11 +1245,14 @@ static struct ttm_bo_driver amdgpu_bo_driver = {
.io_mem_reserve = &amdgpu_ttm_io_mem_reserve,
.io_mem_free = &amdgpu_ttm_io_mem_free,
.io_mem_pfn = amdgpu_ttm_io_mem_pfn,
+ .access_memory = &amdgpu_ttm_access_memory
};
int amdgpu_ttm_init(struct amdgpu_device *adev)
{
+ uint64_t gtt_size;
int r;
+ u64 vis_vram_limit;
r = amdgpu_ttm_global_init(adev);
if (r) {
@@ -1118,36 +1276,37 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
DRM_ERROR("Failed initializing VRAM heap.\n");
return r;
}
+
+ /* Reduce size of CPU-visible VRAM if requested */
+ vis_vram_limit = (u64)amdgpu_vis_vram_limit * 1024 * 1024;
+ if (amdgpu_vis_vram_limit > 0 &&
+ vis_vram_limit <= adev->mc.visible_vram_size)
+ adev->mc.visible_vram_size = vis_vram_limit;
+
/* Change the size here instead of the init above so only lpfn is affected */
amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size);
- r = amdgpu_bo_create(adev, adev->mc.stolen_size, PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
- AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
- NULL, NULL, &adev->stollen_vga_memory);
- if (r) {
- return r;
- }
- r = amdgpu_bo_reserve(adev->stollen_vga_memory, false);
+ r = amdgpu_bo_create_kernel(adev, adev->mc.stolen_size, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &adev->stolen_vga_memory,
+ NULL, NULL);
if (r)
return r;
- r = amdgpu_bo_pin(adev->stollen_vga_memory, AMDGPU_GEM_DOMAIN_VRAM, NULL);
- amdgpu_bo_unreserve(adev->stollen_vga_memory);
- if (r) {
- amdgpu_bo_unref(&adev->stollen_vga_memory);
- return r;
- }
DRM_INFO("amdgpu: %uM of VRAM memory ready\n",
(unsigned) (adev->mc.real_vram_size / (1024 * 1024)));
- r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_TT,
- adev->mc.gtt_size >> PAGE_SHIFT);
+
+ if (amdgpu_gtt_size == -1)
+ gtt_size = max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20),
+ adev->mc.mc_vram_size);
+ else
+ gtt_size = (uint64_t)amdgpu_gtt_size << 20;
+ r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_TT, gtt_size >> PAGE_SHIFT);
if (r) {
DRM_ERROR("Failed initializing GTT heap.\n");
return r;
}
DRM_INFO("amdgpu: %uM of GTT memory ready.\n",
- (unsigned)(adev->mc.gtt_size / (1024 * 1024)));
+ (unsigned)(gtt_size / (1024 * 1024)));
adev->gds.mem.total_size = adev->gds.mem.total_size << AMDGPU_GDS_SHIFT;
adev->gds.mem.gfx_partition_size = adev->gds.mem.gfx_partition_size << AMDGPU_GDS_SHIFT;
@@ -1203,13 +1362,13 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
if (!adev->mman.initialized)
return;
amdgpu_ttm_debugfs_fini(adev);
- if (adev->stollen_vga_memory) {
- r = amdgpu_bo_reserve(adev->stollen_vga_memory, true);
+ if (adev->stolen_vga_memory) {
+ r = amdgpu_bo_reserve(adev->stolen_vga_memory, true);
if (r == 0) {
- amdgpu_bo_unpin(adev->stollen_vga_memory);
- amdgpu_bo_unreserve(adev->stollen_vga_memory);
+ amdgpu_bo_unpin(adev->stolen_vga_memory);
+ amdgpu_bo_unreserve(adev->stolen_vga_memory);
}
- amdgpu_bo_unref(&adev->stollen_vga_memory);
+ amdgpu_bo_unref(&adev->stolen_vga_memory);
}
ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_VRAM);
ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_TT);
@@ -1256,12 +1415,77 @@ int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma)
return ttm_bo_mmap(filp, vma, &adev->mman.bdev);
}
-int amdgpu_copy_buffer(struct amdgpu_ring *ring,
- uint64_t src_offset,
- uint64_t dst_offset,
- uint32_t byte_count,
+static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
+ struct ttm_mem_reg *mem, unsigned num_pages,
+ uint64_t offset, unsigned window,
+ struct amdgpu_ring *ring,
+ uint64_t *addr)
+{
+ struct amdgpu_ttm_tt *gtt = (void *)bo->ttm;
+ struct amdgpu_device *adev = ring->adev;
+ struct ttm_tt *ttm = bo->ttm;
+ struct amdgpu_job *job;
+ unsigned num_dw, num_bytes;
+ dma_addr_t *dma_address;
+ struct dma_fence *fence;
+ uint64_t src_addr, dst_addr;
+ uint64_t flags;
+ int r;
+
+ BUG_ON(adev->mman.buffer_funcs->copy_max_bytes <
+ AMDGPU_GTT_MAX_TRANSFER_SIZE * 8);
+
+ *addr = adev->mc.gart_start;
+ *addr += (u64)window * AMDGPU_GTT_MAX_TRANSFER_SIZE *
+ AMDGPU_GPU_PAGE_SIZE;
+
+ num_dw = adev->mman.buffer_funcs->copy_num_dw;
+ while (num_dw & 0x7)
+ num_dw++;
+
+ num_bytes = num_pages * 8;
+
+ r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes, &job);
+ if (r)
+ return r;
+
+ src_addr = num_dw * 4;
+ src_addr += job->ibs[0].gpu_addr;
+
+ dst_addr = adev->gart.table_addr;
+ dst_addr += window * AMDGPU_GTT_MAX_TRANSFER_SIZE * 8;
+ amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr,
+ dst_addr, num_bytes);
+
+ amdgpu_ring_pad_ib(ring, &job->ibs[0]);
+ WARN_ON(job->ibs[0].length_dw > num_dw);
+
+ dma_address = &gtt->ttm.dma_address[offset >> PAGE_SHIFT];
+ flags = amdgpu_ttm_tt_pte_flags(adev, ttm, mem);
+ r = amdgpu_gart_map(adev, 0, num_pages, dma_address, flags,
+ &job->ibs[0].ptr[num_dw]);
+ if (r)
+ goto error_free;
+
+ r = amdgpu_job_submit(job, ring, &adev->mman.entity,
+ AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
+ if (r)
+ goto error_free;
+
+ dma_fence_put(fence);
+
+ return r;
+
+error_free:
+ amdgpu_job_free(job);
+ return r;
+}
+
+int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
+ uint64_t dst_offset, uint32_t byte_count,
struct reservation_object *resv,
- struct dma_fence **fence, bool direct_submit)
+ struct dma_fence **fence, bool direct_submit,
+ bool vm_needs_flush)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_job *job;
@@ -1283,6 +1507,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring,
if (r)
return r;
+ job->vm_needs_flush = vm_needs_flush;
if (resv) {
r = amdgpu_sync_resv(adev, &job->sync, resv,
AMDGPU_FENCE_OWNER_UNDEFINED);
@@ -1327,11 +1552,12 @@ error_free:
}
int amdgpu_fill_buffer(struct amdgpu_bo *bo,
- uint32_t src_data,
+ uint64_t src_data,
struct reservation_object *resv,
struct dma_fence **fence)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+ /* max_bytes applies to SDMA_OP_PTEPDE as well as SDMA_OP_CONST_FILL*/
uint32_t max_bytes = adev->mman.buffer_funcs->fill_max_bytes;
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
@@ -1347,6 +1573,12 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
return -EINVAL;
}
+ if (bo->tbo.mem.mem_type == TTM_PL_TT) {
+ r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem);
+ if (r)
+ return r;
+ }
+
num_pages = bo->tbo.num_pages;
mm_node = bo->tbo.mem.mm_node;
num_loops = 0;
@@ -1357,7 +1589,9 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
num_pages -= mm_node->size;
++mm_node;
}
- num_dw = num_loops * adev->mman.buffer_funcs->fill_num_dw;
+
+ /* 10 double words for each SDMA_OP_PTEPDE cmd */
+ num_dw = num_loops * 10;
/* for IB padding */
num_dw += 64;
@@ -1382,16 +1616,16 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
uint32_t byte_count = mm_node->size << PAGE_SHIFT;
uint64_t dst_addr;
- r = amdgpu_mm_node_addr(&bo->tbo, mm_node,
- &bo->tbo.mem, &dst_addr);
- if (r)
- return r;
+ WARN_ONCE(byte_count & 0x7, "size should be a multiple of 8");
+ dst_addr = amdgpu_mm_node_addr(&bo->tbo, mm_node, &bo->tbo.mem);
while (byte_count) {
uint32_t cur_size_in_bytes = min(byte_count, max_bytes);
- amdgpu_emit_fill_buffer(adev, &job->ibs[0], src_data,
- dst_addr, cur_size_in_bytes);
+ amdgpu_vm_set_pte_pde(adev, &job->ibs[0],
+ dst_addr, 0,
+ cur_size_in_bytes >> 3, 0,
+ src_data);
dst_addr += cur_size_in_bytes;
byte_count -= cur_size_in_bytes;
@@ -1417,32 +1651,16 @@ error_free:
#if defined(CONFIG_DEBUG_FS)
-extern void amdgpu_gtt_mgr_print(struct seq_file *m, struct ttm_mem_type_manager
- *man);
static int amdgpu_mm_dump_table(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *)m->private;
unsigned ttm_pl = *(int *)node->info_ent->data;
struct drm_device *dev = node->minor->dev;
struct amdgpu_device *adev = dev->dev_private;
- struct drm_mm *mm = (struct drm_mm *)adev->mman.bdev.man[ttm_pl].priv;
- struct ttm_bo_global *glob = adev->mman.bdev.glob;
+ struct ttm_mem_type_manager *man = &adev->mman.bdev.man[ttm_pl];
struct drm_printer p = drm_seq_file_printer(m);
- spin_lock(&glob->lru_lock);
- drm_mm_print(mm, &p);
- spin_unlock(&glob->lru_lock);
- switch (ttm_pl) {
- case TTM_PL_VRAM:
- seq_printf(m, "man size:%llu pages, ram usage:%lluMB, vis usage:%lluMB\n",
- adev->mman.bdev.man[ttm_pl].size,
- (u64)atomic64_read(&adev->vram_usage) >> 20,
- (u64)atomic64_read(&adev->vram_vis_usage) >> 20);
- break;
- case TTM_PL_TT:
- amdgpu_gtt_mgr_print(m, &adev->mman.bdev.man[TTM_PL_TT]);
- break;
- }
+ man->func->debug(man, &p);
return 0;
}
@@ -1574,7 +1792,7 @@ static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
adev, &amdgpu_ttm_gtt_fops);
if (IS_ERR(ent))
return PTR_ERR(ent);
- i_size_write(ent->d_inode, adev->mc.gtt_size);
+ i_size_write(ent->d_inode, adev->mc.gart_size);
adev->mman.gtt = ent;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index 6bdede8ff12b..43093bffa2cf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -34,6 +34,9 @@
#define AMDGPU_PL_FLAG_GWS (TTM_PL_FLAG_PRIV << 1)
#define AMDGPU_PL_FLAG_OA (TTM_PL_FLAG_PRIV << 2)
+#define AMDGPU_GTT_MAX_TRANSFER_SIZE 512
+#define AMDGPU_GTT_NUM_TRANSFER_WINDOWS 2
+
struct amdgpu_mman {
struct ttm_bo_global_ref bo_global_ref;
struct drm_global_reference mem_global_ref;
@@ -49,6 +52,8 @@ struct amdgpu_mman {
/* buffer handling */
const struct amdgpu_buffer_funcs *buffer_funcs;
struct amdgpu_ring *buffer_funcs_ring;
+
+ struct mutex gtt_window_lock;
/* Scheduler entity for buffer moves */
struct amd_sched_entity entity;
};
@@ -56,24 +61,25 @@ struct amdgpu_mman {
extern const struct ttm_mem_type_manager_func amdgpu_gtt_mgr_func;
extern const struct ttm_mem_type_manager_func amdgpu_vram_mgr_func;
-int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man,
- struct ttm_buffer_object *tbo,
- const struct ttm_place *place,
- struct ttm_mem_reg *mem);
+bool amdgpu_gtt_mgr_is_allocated(struct ttm_mem_reg *mem);
+uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager *man);
+
+uint64_t amdgpu_vram_mgr_usage(struct ttm_mem_type_manager *man);
+uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_mem_type_manager *man);
-int amdgpu_copy_buffer(struct amdgpu_ring *ring,
- uint64_t src_offset,
- uint64_t dst_offset,
- uint32_t byte_count,
+int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
+ uint64_t dst_offset, uint32_t byte_count,
struct reservation_object *resv,
- struct dma_fence **fence, bool direct_submit);
+ struct dma_fence **fence, bool direct_submit,
+ bool vm_needs_flush);
int amdgpu_fill_buffer(struct amdgpu_bo *bo,
- uint32_t src_data,
+ uint64_t src_data,
struct reservation_object *resv,
struct dma_fence **fence);
int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma);
bool amdgpu_ttm_is_bound(struct ttm_tt *ttm);
int amdgpu_ttm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *bo_mem);
+int amdgpu_ttm_recover_gart(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
index 4f50eeb65855..36c763310df5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
@@ -275,14 +275,10 @@ amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type)
else
return AMDGPU_FW_LOAD_PSP;
case CHIP_RAVEN:
-#if 0
- if (!load_type)
+ if (load_type != 2)
return AMDGPU_FW_LOAD_DIRECT;
else
return AMDGPU_FW_LOAD_PSP;
-#else
- return AMDGPU_FW_LOAD_DIRECT;
-#endif
default:
DRM_ERROR("Unknow firmware load type\n");
}
@@ -362,8 +358,6 @@ static int amdgpu_ucode_patch_jt(struct amdgpu_firmware_info *ucode,
(le32_to_cpu(header->jt_offset) * 4);
memcpy(dst_addr, src_addr, le32_to_cpu(header->jt_size) * 4);
- ucode->ucode_size += le32_to_cpu(header->jt_size) * 4;
-
return 0;
}
@@ -377,10 +371,15 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
struct amdgpu_firmware_info *ucode = NULL;
const struct common_firmware_header *header = NULL;
+ if (!adev->firmware.fw_size) {
+ dev_warn(adev->dev, "No ip firmware need to load\n");
+ return 0;
+ }
+
err = amdgpu_bo_create(adev, adev->firmware.fw_size, PAGE_SIZE, true,
amdgpu_sriov_vf(adev) ? AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
- NULL, NULL, bo);
+ NULL, NULL, 0, bo);
if (err) {
dev_err(adev->dev, "(%d) Firmware buffer allocate failed\n", err);
goto failed;
@@ -459,6 +458,9 @@ int amdgpu_ucode_fini_bo(struct amdgpu_device *adev)
int i;
struct amdgpu_firmware_info *ucode = NULL;
+ if (!adev->firmware.fw_size)
+ return 0;
+
for (i = 0; i < adev->firmware.max_ucodes; i++) {
ucode = &adev->firmware.ucode[i];
if (ucode->fw) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index 2ca09f111f08..e19928dae8e3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -588,6 +588,10 @@ static int amdgpu_uvd_cs_msg_decode(struct amdgpu_device *adev, uint32_t *msg,
}
break;
+ case 8: /* MJPEG */
+ min_dpb_size = 0;
+ break;
+
case 16: /* H265 */
image_size = (ALIGN(width, 16) * ALIGN(height, 16) * 3) / 2;
image_size = ALIGN(image_size, 256);
@@ -1051,7 +1055,7 @@ int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
- NULL, NULL, &bo);
+ NULL, NULL, 0, &bo);
if (r)
return r;
@@ -1101,7 +1105,7 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
- NULL, NULL, &bo);
+ NULL, NULL, 0, &bo);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index b692ad402252..c855366521ab 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -937,9 +937,9 @@ int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring)
unsigned i;
int r, timeout = adev->usec_timeout;
- /* workaround VCE ring test slow issue for sriov*/
+ /* skip ring test for sriov*/
if (amdgpu_sriov_vf(adev))
- timeout *= 10;
+ return 0;
r = amdgpu_ring_alloc(ring, 16);
if (r) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index 09190fadd228..041e0121590c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -209,9 +209,9 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
if (fences == 0) {
if (adev->pm.dpm_enabled) {
+ /* might be used when with pg/cg
amdgpu_dpm_enable_uvd(adev, false);
- } else {
- amdgpu_asic_set_uvd_clocks(adev, 0, 0);
+ */
}
} else {
schedule_delayed_work(&adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
@@ -223,12 +223,10 @@ void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
struct amdgpu_device *adev = ring->adev;
bool set_clocks = !cancel_delayed_work_sync(&adev->vcn.idle_work);
- if (set_clocks) {
- if (adev->pm.dpm_enabled) {
- amdgpu_dpm_enable_uvd(adev, true);
- } else {
- amdgpu_asic_set_uvd_clocks(adev, 53300, 40000);
- }
+ if (set_clocks && adev->pm.dpm_enabled) {
+ /* might be used when with pg/cg
+ amdgpu_dpm_enable_uvd(adev, true);
+ */
}
}
@@ -361,7 +359,7 @@ static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
- NULL, NULL, &bo);
+ NULL, NULL, 0, &bo);
if (r)
return r;
@@ -413,7 +411,7 @@ static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
- NULL, NULL, &bo);
+ NULL, NULL, 0, &bo);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vf_error.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vf_error.c
new file mode 100644
index 000000000000..45ac91861965
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vf_error.c
@@ -0,0 +1,85 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "amdgpu.h"
+#include "amdgpu_vf_error.h"
+#include "mxgpu_ai.h"
+
+#define AMDGPU_VF_ERROR_ENTRY_SIZE 16
+
+/* struct error_entry - amdgpu VF error information. */
+struct amdgpu_vf_error_buffer {
+ int read_count;
+ int write_count;
+ uint16_t code[AMDGPU_VF_ERROR_ENTRY_SIZE];
+ uint16_t flags[AMDGPU_VF_ERROR_ENTRY_SIZE];
+ uint64_t data[AMDGPU_VF_ERROR_ENTRY_SIZE];
+};
+
+struct amdgpu_vf_error_buffer admgpu_vf_errors;
+
+
+void amdgpu_vf_error_put(uint16_t sub_error_code, uint16_t error_flags, uint64_t error_data)
+{
+ int index;
+ uint16_t error_code = AMDGIM_ERROR_CODE(AMDGIM_ERROR_CATEGORY_VF, sub_error_code);
+
+ index = admgpu_vf_errors.write_count % AMDGPU_VF_ERROR_ENTRY_SIZE;
+ admgpu_vf_errors.code [index] = error_code;
+ admgpu_vf_errors.flags [index] = error_flags;
+ admgpu_vf_errors.data [index] = error_data;
+ admgpu_vf_errors.write_count ++;
+}
+
+
+void amdgpu_vf_error_trans_all(struct amdgpu_device *adev)
+{
+ /* u32 pf2vf_flags = 0; */
+ u32 data1, data2, data3;
+ int index;
+
+ if ((NULL == adev) || (!amdgpu_sriov_vf(adev)) || (!adev->virt.ops) || (!adev->virt.ops->trans_msg)) {
+ return;
+ }
+/*
+ TODO: Enable these code when pv2vf_info is merged
+ AMDGPU_FW_VRAM_PF2VF_READ (adev, feature_flags, &pf2vf_flags);
+ if (!(pf2vf_flags & AMDGIM_FEATURE_ERROR_LOG_COLLECT)) {
+ return;
+ }
+*/
+ /* The errors are overlay of array, correct read_count as full. */
+ if (admgpu_vf_errors.write_count - admgpu_vf_errors.read_count > AMDGPU_VF_ERROR_ENTRY_SIZE) {
+ admgpu_vf_errors.read_count = admgpu_vf_errors.write_count - AMDGPU_VF_ERROR_ENTRY_SIZE;
+ }
+
+ while (admgpu_vf_errors.read_count < admgpu_vf_errors.write_count) {
+ index =admgpu_vf_errors.read_count % AMDGPU_VF_ERROR_ENTRY_SIZE;
+ data1 = AMDGIM_ERROR_CODE_FLAGS_TO_MAILBOX (admgpu_vf_errors.code[index], admgpu_vf_errors.flags[index]);
+ data2 = admgpu_vf_errors.data[index] & 0xFFFFFFFF;
+ data3 = (admgpu_vf_errors.data[index] >> 32) & 0xFFFFFFFF;
+
+ adev->virt.ops->trans_msg(adev, IDH_LOG_VF_ERROR, data1, data2, data3);
+ admgpu_vf_errors.read_count ++;
+ }
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vf_error.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vf_error.h
new file mode 100644
index 000000000000..2a3278ec76ba
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vf_error.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __VF_ERROR_H__
+#define __VF_ERROR_H__
+
+#define AMDGIM_ERROR_CODE_FLAGS_TO_MAILBOX(c,f) (((c & 0xFFFF) << 16) | (f & 0xFFFF))
+#define AMDGIM_ERROR_CODE(t,c) (((t&0xF)<<12)|(c&0xFFF))
+
+/* Please keep enum same as AMD GIM driver */
+enum AMDGIM_ERROR_VF {
+ AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL = 0,
+ AMDGIM_ERROR_VF_NO_VBIOS,
+ AMDGIM_ERROR_VF_GPU_POST_ERROR,
+ AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL,
+ AMDGIM_ERROR_VF_FENCE_INIT_FAIL,
+
+ AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL,
+ AMDGIM_ERROR_VF_IB_INIT_FAIL,
+ AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL,
+ AMDGIM_ERROR_VF_ASIC_RESUME_FAIL,
+ AMDGIM_ERROR_VF_GPU_RESET_FAIL,
+
+ AMDGIM_ERROR_VF_TEST,
+ AMDGIM_ERROR_VF_MAX
+};
+
+enum AMDGIM_ERROR_CATEGORY {
+ AMDGIM_ERROR_CATEGORY_NON_USED = 0,
+ AMDGIM_ERROR_CATEGORY_GIM,
+ AMDGIM_ERROR_CATEGORY_PF,
+ AMDGIM_ERROR_CATEGORY_VF,
+ AMDGIM_ERROR_CATEGORY_VBIOS,
+ AMDGIM_ERROR_CATEGORY_MONITOR,
+
+ AMDGIM_ERROR_CATEGORY_MAX
+};
+
+void amdgpu_vf_error_put(uint16_t sub_error_code, uint16_t error_flags, uint64_t error_data);
+void amdgpu_vf_error_trans_all (struct amdgpu_device *adev);
+
+#endif /* __VF_ERROR_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index 8a081e162d13..ab05121b9272 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -46,14 +46,14 @@ int amdgpu_allocate_static_csa(struct amdgpu_device *adev)
* address within META_DATA init package to support SRIOV gfx preemption.
*/
-int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm)
+int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ struct amdgpu_bo_va **bo_va)
{
- int r;
- struct amdgpu_bo_va *bo_va;
struct ww_acquire_ctx ticket;
struct list_head list;
struct amdgpu_bo_list_entry pd;
struct ttm_validate_buffer csa_tv;
+ int r;
INIT_LIST_HEAD(&list);
INIT_LIST_HEAD(&csa_tv.head);
@@ -69,34 +69,33 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm)
return r;
}
- bo_va = amdgpu_vm_bo_add(adev, vm, adev->virt.csa_obj);
- if (!bo_va) {
+ *bo_va = amdgpu_vm_bo_add(adev, vm, adev->virt.csa_obj);
+ if (!*bo_va) {
ttm_eu_backoff_reservation(&ticket, &list);
DRM_ERROR("failed to create bo_va for static CSA\n");
return -ENOMEM;
}
- r = amdgpu_vm_alloc_pts(adev, bo_va->vm, AMDGPU_CSA_VADDR,
- AMDGPU_CSA_SIZE);
+ r = amdgpu_vm_alloc_pts(adev, (*bo_va)->base.vm, AMDGPU_CSA_VADDR,
+ AMDGPU_CSA_SIZE);
if (r) {
DRM_ERROR("failed to allocate pts for static CSA, err=%d\n", r);
- amdgpu_vm_bo_rmv(adev, bo_va);
+ amdgpu_vm_bo_rmv(adev, *bo_va);
ttm_eu_backoff_reservation(&ticket, &list);
return r;
}
- r = amdgpu_vm_bo_map(adev, bo_va, AMDGPU_CSA_VADDR, 0,AMDGPU_CSA_SIZE,
- AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
- AMDGPU_PTE_EXECUTABLE);
+ r = amdgpu_vm_bo_map(adev, *bo_va, AMDGPU_CSA_VADDR, 0, AMDGPU_CSA_SIZE,
+ AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
+ AMDGPU_PTE_EXECUTABLE);
if (r) {
DRM_ERROR("failed to do bo_map on static CSA, err=%d\n", r);
- amdgpu_vm_bo_rmv(adev, bo_va);
+ amdgpu_vm_bo_rmv(adev, *bo_va);
ttm_eu_backoff_reservation(&ticket, &list);
return r;
}
- vm->csa_bo_va = bo_va;
ttm_eu_backoff_reservation(&ticket, &list);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
index 9e1062edb76e..afcfb8bcfb65 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
@@ -43,6 +43,7 @@ struct amdgpu_virt_ops {
int (*req_full_gpu)(struct amdgpu_device *adev, bool init);
int (*rel_full_gpu)(struct amdgpu_device *adev, bool init);
int (*reset_gpu)(struct amdgpu_device *adev);
+ void (*trans_msg)(struct amdgpu_device *adev, u32 req, u32 data1, u32 data2, u32 data3);
};
/* GPU virtualization */
@@ -89,7 +90,8 @@ static inline bool is_virtual_machine(void)
struct amdgpu_vm;
int amdgpu_allocate_static_csa(struct amdgpu_device *adev);
-int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm);
+int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ struct amdgpu_bo_va **bo_va);
void amdgpu_virt_init_setting(struct amdgpu_device *adev);
uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg);
void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 5795f81369f0..bd20ff018512 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -77,8 +77,6 @@ struct amdgpu_pte_update_params {
void (*func)(struct amdgpu_pte_update_params *params, uint64_t pe,
uint64_t addr, unsigned count, uint32_t incr,
uint64_t flags);
- /* indicate update pt or its shadow */
- bool shadow;
/* The next two are used during VM update by CPU
* DMA addresses to use for mapping
* Kernel pointer of PD/PT BO that needs to be updated
@@ -161,11 +159,18 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
*/
static int amdgpu_vm_validate_level(struct amdgpu_vm_pt *parent,
int (*validate)(void *, struct amdgpu_bo *),
- void *param)
+ void *param, bool use_cpu_for_update,
+ struct ttm_bo_global *glob)
{
unsigned i;
int r;
+ if (use_cpu_for_update) {
+ r = amdgpu_bo_kmap(parent->bo, NULL);
+ if (r)
+ return r;
+ }
+
if (!parent->entries)
return 0;
@@ -179,11 +184,18 @@ static int amdgpu_vm_validate_level(struct amdgpu_vm_pt *parent,
if (r)
return r;
+ spin_lock(&glob->lru_lock);
+ ttm_bo_move_to_lru_tail(&entry->bo->tbo);
+ if (entry->bo->shadow)
+ ttm_bo_move_to_lru_tail(&entry->bo->shadow->tbo);
+ spin_unlock(&glob->lru_lock);
+
/*
* Recurse into the sub directory. This is harmless because we
* have only a maximum of 5 layers.
*/
- r = amdgpu_vm_validate_level(entry, validate, param);
+ r = amdgpu_vm_validate_level(entry, validate, param,
+ use_cpu_for_update, glob);
if (r)
return r;
}
@@ -214,54 +226,12 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
if (num_evictions == vm->last_eviction_counter)
return 0;
- return amdgpu_vm_validate_level(&vm->root, validate, param);
+ return amdgpu_vm_validate_level(&vm->root, validate, param,
+ vm->use_cpu_for_update,
+ adev->mman.bdev.glob);
}
/**
- * amdgpu_vm_move_level_in_lru - move one level of PT BOs to the LRU tail
- *
- * @adev: amdgpu device instance
- * @vm: vm providing the BOs
- *
- * Move the PT BOs to the tail of the LRU.
- */
-static void amdgpu_vm_move_level_in_lru(struct amdgpu_vm_pt *parent)
-{
- unsigned i;
-
- if (!parent->entries)
- return;
-
- for (i = 0; i <= parent->last_entry_used; ++i) {
- struct amdgpu_vm_pt *entry = &parent->entries[i];
-
- if (!entry->bo)
- continue;
-
- ttm_bo_move_to_lru_tail(&entry->bo->tbo);
- amdgpu_vm_move_level_in_lru(entry);
- }
-}
-
-/**
- * amdgpu_vm_move_pt_bos_in_lru - move the PT BOs to the LRU tail
- *
- * @adev: amdgpu device instance
- * @vm: vm providing the BOs
- *
- * Move the PT BOs to the tail of the LRU.
- */
-void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
- struct amdgpu_vm *vm)
-{
- struct ttm_bo_global *glob = adev->mman.bdev.glob;
-
- spin_lock(&glob->lru_lock);
- amdgpu_vm_move_level_in_lru(&vm->root);
- spin_unlock(&glob->lru_lock);
-}
-
- /**
* amdgpu_vm_alloc_levels - allocate the PD/PT levels
*
* @adev: amdgpu_device pointer
@@ -282,6 +252,7 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
unsigned pt_idx, from, to;
int r;
u64 flags;
+ uint64_t init_value = 0;
if (!parent->entries) {
unsigned num_entries = amdgpu_vm_num_entries(adev, level);
@@ -315,6 +286,12 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
flags |= (AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
AMDGPU_GEM_CREATE_SHADOW);
+ if (vm->pte_support_ats) {
+ init_value = AMDGPU_PTE_SYSTEM;
+ if (level != adev->vm_manager.num_level - 1)
+ init_value |= AMDGPU_PDE_PTE;
+ }
+
/* walk over the address space and allocate the page tables */
for (pt_idx = from; pt_idx <= to; ++pt_idx) {
struct reservation_object *resv = vm->root.bo->tbo.resv;
@@ -327,10 +304,18 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
AMDGPU_GPU_PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_VRAM,
flags,
- NULL, resv, &pt);
+ NULL, resv, init_value, &pt);
if (r)
return r;
+ if (vm->use_cpu_for_update) {
+ r = amdgpu_bo_kmap(pt, NULL);
+ if (r) {
+ amdgpu_bo_unref(&pt);
+ return r;
+ }
+ }
+
/* Keep a reference to the root directory to avoid
* freeing them up in the wrong order.
*/
@@ -424,7 +409,7 @@ static int amdgpu_vm_grab_reserved_vmid_locked(struct amdgpu_vm *vm,
struct dma_fence *updates = sync->last_vm_update;
int r = 0;
struct dma_fence *flushed, *tmp;
- bool needs_flush = false;
+ bool needs_flush = vm->use_cpu_for_update;
flushed = id->flushed_updates;
if ((amdgpu_vm_had_gpu_reset(adev, id)) ||
@@ -545,11 +530,11 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
}
kfree(fences);
- job->vm_needs_flush = false;
+ job->vm_needs_flush = vm->use_cpu_for_update;
/* Check if we can use a VMID already assigned to this VM */
list_for_each_entry_reverse(id, &id_mgr->ids_lru, list) {
struct dma_fence *flushed;
- bool needs_flush = false;
+ bool needs_flush = vm->use_cpu_for_update;
/* Check all the prerequisites to using this VMID */
if (amdgpu_vm_had_gpu_reset(adev, id))
@@ -745,7 +730,7 @@ static bool amdgpu_vm_is_large_bar(struct amdgpu_device *adev)
*
* Emit a VM flush when it is necessary.
*/
-int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job)
+int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync)
{
struct amdgpu_device *adev = ring->adev;
unsigned vmhub = ring->funcs->vmhub;
@@ -767,12 +752,15 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job)
vm_flush_needed = true;
}
- if (!vm_flush_needed && !gds_switch_needed)
+ if (!vm_flush_needed && !gds_switch_needed && !need_pipe_sync)
return 0;
if (ring->funcs->init_cond_exec)
patch_offset = amdgpu_ring_init_cond_exec(ring);
+ if (need_pipe_sync)
+ amdgpu_ring_emit_pipeline_sync(ring);
+
if (ring->funcs->emit_vm_flush && vm_flush_needed) {
struct dma_fence *fence;
@@ -874,8 +862,8 @@ struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
{
struct amdgpu_bo_va *bo_va;
- list_for_each_entry(bo_va, &bo->va, bo_list) {
- if (bo_va->vm == vm) {
+ list_for_each_entry(bo_va, &bo->va, base.bo_list) {
+ if (bo_va->base.vm == vm) {
return bo_va;
}
}
@@ -981,6 +969,8 @@ static void amdgpu_vm_cpu_set_ptes(struct amdgpu_pte_update_params *params,
unsigned int i;
uint64_t value;
+ trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags);
+
for (i = 0; i < count; i++) {
value = params->pages_addr ?
amdgpu_vm_map_gart(params->pages_addr, addr) :
@@ -989,19 +979,16 @@ static void amdgpu_vm_cpu_set_ptes(struct amdgpu_pte_update_params *params,
i, value, flags);
addr += incr;
}
-
- /* Flush HDP */
- mb();
- amdgpu_gart_flush_gpu_tlb(params->adev, 0);
}
-static int amdgpu_vm_bo_wait(struct amdgpu_device *adev, struct amdgpu_bo *bo)
+static int amdgpu_vm_wait_pd(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ void *owner)
{
struct amdgpu_sync sync;
int r;
amdgpu_sync_create(&sync);
- amdgpu_sync_resv(adev, &sync, bo->tbo.resv, AMDGPU_FENCE_OWNER_VM);
+ amdgpu_sync_resv(adev, &sync, vm->root.bo->tbo.resv, owner);
r = amdgpu_sync_wait(&sync, true);
amdgpu_sync_free(&sync);
@@ -1042,23 +1029,14 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev,
params.adev = adev;
shadow = parent->bo->shadow;
- WARN_ON(vm->use_cpu_for_update && shadow);
- if (vm->use_cpu_for_update && !shadow) {
- r = amdgpu_bo_kmap(parent->bo, (void **)&pd_addr);
- if (r)
- return r;
- r = amdgpu_vm_bo_wait(adev, parent->bo);
- if (unlikely(r)) {
- amdgpu_bo_kunmap(parent->bo);
+ if (vm->use_cpu_for_update) {
+ pd_addr = (unsigned long)amdgpu_bo_kptr(parent->bo);
+ r = amdgpu_vm_wait_pd(adev, vm, AMDGPU_FENCE_OWNER_VM);
+ if (unlikely(r))
return r;
- }
+
params.func = amdgpu_vm_cpu_set_ptes;
} else {
- if (shadow) {
- r = amdgpu_ttm_bind(&shadow->tbo, &shadow->tbo.mem);
- if (r)
- return r;
- }
ring = container_of(vm->entity.sched, struct amdgpu_ring,
sched);
@@ -1094,21 +1072,14 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev,
if (bo == NULL)
continue;
- if (bo->shadow) {
- struct amdgpu_bo *pt_shadow = bo->shadow;
-
- r = amdgpu_ttm_bind(&pt_shadow->tbo,
- &pt_shadow->tbo.mem);
- if (r)
- return r;
- }
-
pt = amdgpu_bo_gpu_offset(bo);
pt = amdgpu_gart_get_vm_pde(adev, pt);
- if (parent->entries[pt_idx].addr == pt)
+ /* Don't update huge pages here */
+ if ((parent->entries[pt_idx].addr & AMDGPU_PDE_PTE) ||
+ parent->entries[pt_idx].addr == (pt | AMDGPU_PTE_VALID))
continue;
- parent->entries[pt_idx].addr = pt;
+ parent->entries[pt_idx].addr = pt | AMDGPU_PTE_VALID;
pde = pd_addr + pt_idx * 8;
if (((last_pde + 8 * count) != pde) ||
@@ -1146,28 +1117,29 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev,
count, incr, AMDGPU_PTE_VALID);
}
- if (params.func == amdgpu_vm_cpu_set_ptes)
- amdgpu_bo_kunmap(parent->bo);
- else if (params.ib->length_dw == 0) {
- amdgpu_job_free(job);
- } else {
- amdgpu_ring_pad_ib(ring, params.ib);
- amdgpu_sync_resv(adev, &job->sync, parent->bo->tbo.resv,
- AMDGPU_FENCE_OWNER_VM);
- if (shadow)
- amdgpu_sync_resv(adev, &job->sync, shadow->tbo.resv,
+ if (!vm->use_cpu_for_update) {
+ if (params.ib->length_dw == 0) {
+ amdgpu_job_free(job);
+ } else {
+ amdgpu_ring_pad_ib(ring, params.ib);
+ amdgpu_sync_resv(adev, &job->sync, parent->bo->tbo.resv,
AMDGPU_FENCE_OWNER_VM);
+ if (shadow)
+ amdgpu_sync_resv(adev, &job->sync,
+ shadow->tbo.resv,
+ AMDGPU_FENCE_OWNER_VM);
+
+ WARN_ON(params.ib->length_dw > ndw);
+ r = amdgpu_job_submit(job, ring, &vm->entity,
+ AMDGPU_FENCE_OWNER_VM, &fence);
+ if (r)
+ goto error_free;
- WARN_ON(params.ib->length_dw > ndw);
- r = amdgpu_job_submit(job, ring, &vm->entity,
- AMDGPU_FENCE_OWNER_VM, &fence);
- if (r)
- goto error_free;
-
- amdgpu_bo_fence(parent->bo, fence, true);
- dma_fence_put(vm->last_dir_update);
- vm->last_dir_update = dma_fence_get(fence);
- dma_fence_put(fence);
+ amdgpu_bo_fence(parent->bo, fence, true);
+ dma_fence_put(vm->last_dir_update);
+ vm->last_dir_update = dma_fence_get(fence);
+ dma_fence_put(fence);
+ }
}
/*
* Recurse into the subdirectories. This recursion is harmless because
@@ -1235,33 +1207,112 @@ int amdgpu_vm_update_directories(struct amdgpu_device *adev,
if (r)
amdgpu_vm_invalidate_level(&vm->root);
+ if (vm->use_cpu_for_update) {
+ /* Flush HDP */
+ mb();
+ amdgpu_gart_flush_gpu_tlb(adev, 0);
+ }
+
return r;
}
/**
- * amdgpu_vm_find_pt - find the page table for an address
+ * amdgpu_vm_find_entry - find the entry for an address
*
* @p: see amdgpu_pte_update_params definition
* @addr: virtual address in question
+ * @entry: resulting entry or NULL
+ * @parent: parent entry
*
- * Find the page table BO for a virtual address, return NULL when none found.
+ * Find the vm_pt entry and it's parent for the given address.
*/
-static struct amdgpu_bo *amdgpu_vm_get_pt(struct amdgpu_pte_update_params *p,
- uint64_t addr)
+void amdgpu_vm_get_entry(struct amdgpu_pte_update_params *p, uint64_t addr,
+ struct amdgpu_vm_pt **entry,
+ struct amdgpu_vm_pt **parent)
{
- struct amdgpu_vm_pt *entry = &p->vm->root;
unsigned idx, level = p->adev->vm_manager.num_level;
- while (entry->entries) {
+ *parent = NULL;
+ *entry = &p->vm->root;
+ while ((*entry)->entries) {
idx = addr >> (p->adev->vm_manager.block_size * level--);
- idx %= amdgpu_bo_size(entry->bo) / 8;
- entry = &entry->entries[idx];
+ idx %= amdgpu_bo_size((*entry)->bo) / 8;
+ *parent = *entry;
+ *entry = &(*entry)->entries[idx];
}
if (level)
- return NULL;
+ *entry = NULL;
+}
- return entry->bo;
+/**
+ * amdgpu_vm_handle_huge_pages - handle updating the PD with huge pages
+ *
+ * @p: see amdgpu_pte_update_params definition
+ * @entry: vm_pt entry to check
+ * @parent: parent entry
+ * @nptes: number of PTEs updated with this operation
+ * @dst: destination address where the PTEs should point to
+ * @flags: access flags fro the PTEs
+ *
+ * Check if we can update the PD with a huge page.
+ */
+static void amdgpu_vm_handle_huge_pages(struct amdgpu_pte_update_params *p,
+ struct amdgpu_vm_pt *entry,
+ struct amdgpu_vm_pt *parent,
+ unsigned nptes, uint64_t dst,
+ uint64_t flags)
+{
+ bool use_cpu_update = (p->func == amdgpu_vm_cpu_set_ptes);
+ uint64_t pd_addr, pde;
+
+ /* In the case of a mixed PT the PDE must point to it*/
+ if (p->adev->asic_type < CHIP_VEGA10 ||
+ nptes != AMDGPU_VM_PTE_COUNT(p->adev) ||
+ p->src ||
+ !(flags & AMDGPU_PTE_VALID)) {
+
+ dst = amdgpu_bo_gpu_offset(entry->bo);
+ dst = amdgpu_gart_get_vm_pde(p->adev, dst);
+ flags = AMDGPU_PTE_VALID;
+ } else {
+ /* Set the huge page flag to stop scanning at this PDE */
+ flags |= AMDGPU_PDE_PTE;
+ }
+
+ if (entry->addr == (dst | flags))
+ return;
+
+ entry->addr = (dst | flags);
+
+ if (use_cpu_update) {
+ /* In case a huge page is replaced with a system
+ * memory mapping, p->pages_addr != NULL and
+ * amdgpu_vm_cpu_set_ptes would try to translate dst
+ * through amdgpu_vm_map_gart. But dst is already a
+ * GPU address (of the page table). Disable
+ * amdgpu_vm_map_gart temporarily.
+ */
+ dma_addr_t *tmp;
+
+ tmp = p->pages_addr;
+ p->pages_addr = NULL;
+
+ pd_addr = (unsigned long)amdgpu_bo_kptr(parent->bo);
+ pde = pd_addr + (entry - parent->entries) * 8;
+ amdgpu_vm_cpu_set_ptes(p, pde, dst, 1, 0, flags);
+
+ p->pages_addr = tmp;
+ } else {
+ if (parent->bo->shadow) {
+ pd_addr = amdgpu_bo_gpu_offset(parent->bo->shadow);
+ pde = pd_addr + (entry - parent->entries) * 8;
+ amdgpu_vm_do_set_ptes(p, pde, dst, 1, 0, flags);
+ }
+ pd_addr = amdgpu_bo_gpu_offset(parent->bo);
+ pde = pd_addr + (entry - parent->entries) * 8;
+ amdgpu_vm_do_set_ptes(p, pde, dst, 1, 0, flags);
+ }
}
/**
@@ -1287,49 +1338,44 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
uint64_t addr, pe_start;
struct amdgpu_bo *pt;
unsigned nptes;
- int r;
bool use_cpu_update = (params->func == amdgpu_vm_cpu_set_ptes);
-
/* walk over the address space and update the page tables */
- for (addr = start; addr < end; addr += nptes) {
- pt = amdgpu_vm_get_pt(params, addr);
- if (!pt) {
- pr_err("PT not found, aborting update_ptes\n");
- return -EINVAL;
- }
-
- if (params->shadow) {
- if (WARN_ONCE(use_cpu_update,
- "CPU VM update doesn't suuport shadow pages"))
- return 0;
+ for (addr = start; addr < end; addr += nptes,
+ dst += nptes * AMDGPU_GPU_PAGE_SIZE) {
+ struct amdgpu_vm_pt *entry, *parent;
- if (!pt->shadow)
- return 0;
- pt = pt->shadow;
- }
+ amdgpu_vm_get_entry(params, addr, &entry, &parent);
+ if (!entry)
+ return -ENOENT;
if ((addr & ~mask) == (end & ~mask))
nptes = end - addr;
else
nptes = AMDGPU_VM_PTE_COUNT(adev) - (addr & mask);
+ amdgpu_vm_handle_huge_pages(params, entry, parent,
+ nptes, dst, flags);
+ /* We don't need to update PTEs for huge pages */
+ if (entry->addr & AMDGPU_PDE_PTE)
+ continue;
+
+ pt = entry->bo;
if (use_cpu_update) {
- r = amdgpu_bo_kmap(pt, (void *)&pe_start);
- if (r)
- return r;
- } else
+ pe_start = (unsigned long)amdgpu_bo_kptr(pt);
+ } else {
+ if (pt->shadow) {
+ pe_start = amdgpu_bo_gpu_offset(pt->shadow);
+ pe_start += (addr & mask) * 8;
+ params->func(params, pe_start, dst, nptes,
+ AMDGPU_GPU_PAGE_SIZE, flags);
+ }
pe_start = amdgpu_bo_gpu_offset(pt);
+ }
pe_start += (addr & mask) * 8;
-
params->func(params, pe_start, dst, nptes,
AMDGPU_GPU_PAGE_SIZE, flags);
-
- dst += nptes * AMDGPU_GPU_PAGE_SIZE;
-
- if (use_cpu_update)
- amdgpu_bo_kunmap(pt);
}
return 0;
@@ -1370,10 +1416,9 @@ static int amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params *params,
* Userspace can support this by aligning virtual base address and
* allocation size to the fragment size.
*/
-
- /* SI and newer are optimized for 64KB */
- uint64_t frag_flags = AMDGPU_PTE_FRAG(AMDGPU_LOG2_PAGES_PER_FRAG);
- uint64_t frag_align = 1 << AMDGPU_LOG2_PAGES_PER_FRAG;
+ unsigned pages_per_frag = params->adev->vm_manager.fragment_size;
+ uint64_t frag_flags = AMDGPU_PTE_FRAG(pages_per_frag);
+ uint64_t frag_align = 1 << pages_per_frag;
uint64_t frag_start = ALIGN(start, frag_align);
uint64_t frag_end = end & ~(frag_align - 1);
@@ -1445,6 +1490,10 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
params.vm = vm;
params.src = src;
+ /* sync to everything on unmapping */
+ if (!(flags & AMDGPU_PTE_VALID))
+ owner = AMDGPU_FENCE_OWNER_UNDEFINED;
+
if (vm->use_cpu_for_update) {
/* params.src is used as flag to indicate system Memory */
if (pages_addr)
@@ -1453,23 +1502,18 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
/* Wait for PT BOs to be free. PTs share the same resv. object
* as the root PD BO
*/
- r = amdgpu_vm_bo_wait(adev, vm->root.bo);
+ r = amdgpu_vm_wait_pd(adev, vm, owner);
if (unlikely(r))
return r;
params.func = amdgpu_vm_cpu_set_ptes;
params.pages_addr = pages_addr;
- params.shadow = false;
return amdgpu_vm_frag_ptes(&params, start, last + 1,
addr, flags);
}
ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
- /* sync to everything on unmapping */
- if (!(flags & AMDGPU_PTE_VALID))
- owner = AMDGPU_FENCE_OWNER_UNDEFINED;
-
nptes = last - start + 1;
/*
@@ -1481,6 +1525,9 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
/* padding, etc. */
ndw = 64;
+ /* one PDE write for each huge page */
+ ndw += ((nptes >> adev->vm_manager.block_size) + 1) * 6;
+
if (src) {
/* only copy commands needed */
ndw += ncmds * 7;
@@ -1542,11 +1589,6 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
if (r)
goto error_free;
- params.shadow = true;
- r = amdgpu_vm_frag_ptes(&params, start, last + 1, addr, flags);
- if (r)
- goto error_free;
- params.shadow = false;
r = amdgpu_vm_frag_ptes(&params, start, last + 1, addr, flags);
if (r)
goto error_free;
@@ -1565,6 +1607,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
error_free:
amdgpu_job_free(job);
+ amdgpu_vm_invalidate_level(&vm->root);
return r;
}
@@ -1573,7 +1616,6 @@ error_free:
*
* @adev: amdgpu_device pointer
* @exclusive: fence we need to sync to
- * @gtt_flags: flags as they are used for GTT
* @pages_addr: DMA addresses to use for mapping
* @vm: requested vm
* @mapping: mapped range and flags to use for the update
@@ -1587,7 +1629,6 @@ error_free:
*/
static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
struct dma_fence *exclusive,
- uint64_t gtt_flags,
dma_addr_t *pages_addr,
struct amdgpu_vm *vm,
struct amdgpu_bo_va_mapping *mapping,
@@ -1642,11 +1683,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
}
if (pages_addr) {
- if (flags == gtt_flags)
- src = adev->gart.table_addr +
- (addr >> AMDGPU_GPU_PAGE_SHIFT) * 8;
- else
- max_entries = min(max_entries, 16ull * 1024ull);
+ max_entries = min(max_entries, 16ull * 1024ull);
addr = 0;
} else if (flags & AMDGPU_PTE_VALID) {
addr += adev->vm_manager.vram_base_offset;
@@ -1687,50 +1724,45 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va,
bool clear)
{
- struct amdgpu_vm *vm = bo_va->vm;
+ struct amdgpu_bo *bo = bo_va->base.bo;
+ struct amdgpu_vm *vm = bo_va->base.vm;
struct amdgpu_bo_va_mapping *mapping;
dma_addr_t *pages_addr = NULL;
- uint64_t gtt_flags, flags;
struct ttm_mem_reg *mem;
struct drm_mm_node *nodes;
struct dma_fence *exclusive;
+ uint64_t flags;
int r;
- if (clear || !bo_va->bo) {
+ if (clear || !bo_va->base.bo) {
mem = NULL;
nodes = NULL;
exclusive = NULL;
} else {
struct ttm_dma_tt *ttm;
- mem = &bo_va->bo->tbo.mem;
+ mem = &bo_va->base.bo->tbo.mem;
nodes = mem->mm_node;
if (mem->mem_type == TTM_PL_TT) {
- ttm = container_of(bo_va->bo->tbo.ttm, struct
- ttm_dma_tt, ttm);
+ ttm = container_of(bo_va->base.bo->tbo.ttm,
+ struct ttm_dma_tt, ttm);
pages_addr = ttm->dma_address;
}
- exclusive = reservation_object_get_excl(bo_va->bo->tbo.resv);
+ exclusive = reservation_object_get_excl(bo->tbo.resv);
}
- if (bo_va->bo) {
- flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem);
- gtt_flags = (amdgpu_ttm_is_bound(bo_va->bo->tbo.ttm) &&
- adev == amdgpu_ttm_adev(bo_va->bo->tbo.bdev)) ?
- flags : 0;
- } else {
+ if (bo)
+ flags = amdgpu_ttm_tt_pte_flags(adev, bo->tbo.ttm, mem);
+ else
flags = 0x0;
- gtt_flags = ~0x0;
- }
spin_lock(&vm->status_lock);
- if (!list_empty(&bo_va->vm_status))
+ if (!list_empty(&bo_va->base.vm_status))
list_splice_init(&bo_va->valids, &bo_va->invalids);
spin_unlock(&vm->status_lock);
list_for_each_entry(mapping, &bo_va->invalids, list) {
- r = amdgpu_vm_bo_split_mapping(adev, exclusive,
- gtt_flags, pages_addr, vm,
+ r = amdgpu_vm_bo_split_mapping(adev, exclusive, pages_addr, vm,
mapping, flags, nodes,
&bo_va->last_pt_update);
if (r)
@@ -1747,11 +1779,17 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
spin_lock(&vm->status_lock);
list_splice_init(&bo_va->invalids, &bo_va->valids);
- list_del_init(&bo_va->vm_status);
+ list_del_init(&bo_va->base.vm_status);
if (clear)
- list_add(&bo_va->vm_status, &vm->cleared);
+ list_add(&bo_va->base.vm_status, &vm->cleared);
spin_unlock(&vm->status_lock);
+ if (vm->use_cpu_for_update) {
+ /* Flush HDP */
+ mb();
+ amdgpu_gart_flush_gpu_tlb(adev, 0);
+ }
+
return 0;
}
@@ -1905,15 +1943,19 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
struct amdgpu_bo_va_mapping *mapping;
struct dma_fence *f = NULL;
int r;
+ uint64_t init_pte_value = 0;
while (!list_empty(&vm->freed)) {
mapping = list_first_entry(&vm->freed,
struct amdgpu_bo_va_mapping, list);
list_del(&mapping->list);
+ if (vm->pte_support_ats)
+ init_pte_value = AMDGPU_PTE_SYSTEM;
+
r = amdgpu_vm_bo_update_mapping(adev, NULL, 0, NULL, vm,
mapping->start, mapping->last,
- 0, 0, &f);
+ init_pte_value, 0, &f);
amdgpu_vm_free_mapping(adev, vm, mapping, f);
if (r) {
dma_fence_put(f);
@@ -1933,26 +1975,26 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
}
/**
- * amdgpu_vm_clear_invalids - clear invalidated BOs in the PT
+ * amdgpu_vm_clear_moved - clear moved BOs in the PT
*
* @adev: amdgpu_device pointer
* @vm: requested vm
*
- * Make sure all invalidated BOs are cleared in the PT.
+ * Make sure all moved BOs are cleared in the PT.
* Returns 0 for success.
*
* PTs have to be reserved and mutex must be locked!
*/
-int amdgpu_vm_clear_invalids(struct amdgpu_device *adev,
- struct amdgpu_vm *vm, struct amdgpu_sync *sync)
+int amdgpu_vm_clear_moved(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ struct amdgpu_sync *sync)
{
struct amdgpu_bo_va *bo_va = NULL;
int r = 0;
spin_lock(&vm->status_lock);
- while (!list_empty(&vm->invalidated)) {
- bo_va = list_first_entry(&vm->invalidated,
- struct amdgpu_bo_va, vm_status);
+ while (!list_empty(&vm->moved)) {
+ bo_va = list_first_entry(&vm->moved,
+ struct amdgpu_bo_va, base.vm_status);
spin_unlock(&vm->status_lock);
r = amdgpu_vm_bo_update(adev, bo_va, true);
@@ -1992,16 +2034,17 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
if (bo_va == NULL) {
return NULL;
}
- bo_va->vm = vm;
- bo_va->bo = bo;
+ bo_va->base.vm = vm;
+ bo_va->base.bo = bo;
+ INIT_LIST_HEAD(&bo_va->base.bo_list);
+ INIT_LIST_HEAD(&bo_va->base.vm_status);
+
bo_va->ref_count = 1;
- INIT_LIST_HEAD(&bo_va->bo_list);
INIT_LIST_HEAD(&bo_va->valids);
INIT_LIST_HEAD(&bo_va->invalids);
- INIT_LIST_HEAD(&bo_va->vm_status);
if (bo)
- list_add_tail(&bo_va->bo_list, &bo->va);
+ list_add_tail(&bo_va->base.bo_list, &bo->va);
return bo_va;
}
@@ -2026,7 +2069,8 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
uint64_t size, uint64_t flags)
{
struct amdgpu_bo_va_mapping *mapping, *tmp;
- struct amdgpu_vm *vm = bo_va->vm;
+ struct amdgpu_bo *bo = bo_va->base.bo;
+ struct amdgpu_vm *vm = bo_va->base.vm;
uint64_t eaddr;
/* validate the parameters */
@@ -2037,7 +2081,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
/* make sure object fit at this offset */
eaddr = saddr + size - 1;
if (saddr >= eaddr ||
- (bo_va->bo && offset + size > amdgpu_bo_size(bo_va->bo)))
+ (bo && offset + size > amdgpu_bo_size(bo)))
return -EINVAL;
saddr /= AMDGPU_GPU_PAGE_SIZE;
@@ -2047,7 +2091,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
if (tmp) {
/* bo and tmp overlap, invalid addr */
dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with "
- "0x%010Lx-0x%010Lx\n", bo_va->bo, saddr, eaddr,
+ "0x%010Lx-0x%010Lx\n", bo, saddr, eaddr,
tmp->start, tmp->last + 1);
return -EINVAL;
}
@@ -2092,7 +2136,8 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
uint64_t size, uint64_t flags)
{
struct amdgpu_bo_va_mapping *mapping;
- struct amdgpu_vm *vm = bo_va->vm;
+ struct amdgpu_bo *bo = bo_va->base.bo;
+ struct amdgpu_vm *vm = bo_va->base.vm;
uint64_t eaddr;
int r;
@@ -2104,7 +2149,7 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
/* make sure object fit at this offset */
eaddr = saddr + size - 1;
if (saddr >= eaddr ||
- (bo_va->bo && offset + size > amdgpu_bo_size(bo_va->bo)))
+ (bo && offset + size > amdgpu_bo_size(bo)))
return -EINVAL;
/* Allocate all the needed memory */
@@ -2112,7 +2157,7 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
if (!mapping)
return -ENOMEM;
- r = amdgpu_vm_bo_clear_mappings(adev, bo_va->vm, saddr, size);
+ r = amdgpu_vm_bo_clear_mappings(adev, bo_va->base.vm, saddr, size);
if (r) {
kfree(mapping);
return r;
@@ -2152,7 +2197,7 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
uint64_t saddr)
{
struct amdgpu_bo_va_mapping *mapping;
- struct amdgpu_vm *vm = bo_va->vm;
+ struct amdgpu_vm *vm = bo_va->base.vm;
bool valid = true;
saddr /= AMDGPU_GPU_PAGE_SIZE;
@@ -2300,12 +2345,12 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va)
{
struct amdgpu_bo_va_mapping *mapping, *next;
- struct amdgpu_vm *vm = bo_va->vm;
+ struct amdgpu_vm *vm = bo_va->base.vm;
- list_del(&bo_va->bo_list);
+ list_del(&bo_va->base.bo_list);
spin_lock(&vm->status_lock);
- list_del(&bo_va->vm_status);
+ list_del(&bo_va->base.vm_status);
spin_unlock(&vm->status_lock);
list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
@@ -2337,13 +2382,14 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
struct amdgpu_bo *bo)
{
- struct amdgpu_bo_va *bo_va;
+ struct amdgpu_vm_bo_base *bo_base;
- list_for_each_entry(bo_va, &bo->va, bo_list) {
- spin_lock(&bo_va->vm->status_lock);
- if (list_empty(&bo_va->vm_status))
- list_add(&bo_va->vm_status, &bo_va->vm->invalidated);
- spin_unlock(&bo_va->vm->status_lock);
+ list_for_each_entry(bo_base, &bo->va, bo_list) {
+ spin_lock(&bo_base->vm->status_lock);
+ if (list_empty(&bo_base->vm_status))
+ list_add(&bo_base->vm_status,
+ &bo_base->vm->moved);
+ spin_unlock(&bo_base->vm->status_lock);
}
}
@@ -2361,12 +2407,26 @@ static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size)
}
/**
- * amdgpu_vm_adjust_size - adjust vm size and block size
+ * amdgpu_vm_set_fragment_size - adjust fragment size in PTE
+ *
+ * @adev: amdgpu_device pointer
+ * @fragment_size_default: the default fragment size if it's set auto
+ */
+void amdgpu_vm_set_fragment_size(struct amdgpu_device *adev, uint32_t fragment_size_default)
+{
+ if (amdgpu_vm_fragment_size == -1)
+ adev->vm_manager.fragment_size = fragment_size_default;
+ else
+ adev->vm_manager.fragment_size = amdgpu_vm_fragment_size;
+}
+
+/**
+ * amdgpu_vm_adjust_size - adjust vm size, block size and fragment size
*
* @adev: amdgpu_device pointer
* @vm_size: the default vm size if it's set auto
*/
-void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint64_t vm_size)
+void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint64_t vm_size, uint32_t fragment_size_default)
{
/* adjust vm size firstly */
if (amdgpu_vm_size == -1)
@@ -2381,8 +2441,11 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint64_t vm_size)
else
adev->vm_manager.block_size = amdgpu_vm_block_size;
- DRM_INFO("vm size is %llu GB, block size is %u-bit\n",
- adev->vm_manager.vm_size, adev->vm_manager.block_size);
+ amdgpu_vm_set_fragment_size(adev, fragment_size_default);
+
+ DRM_INFO("vm size is %llu GB, block size is %u-bit, fragment size is %u-bit\n",
+ adev->vm_manager.vm_size, adev->vm_manager.block_size,
+ adev->vm_manager.fragment_size);
}
/**
@@ -2404,13 +2467,14 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
struct amd_sched_rq *rq;
int r, i;
u64 flags;
+ uint64_t init_pde_value = 0;
- vm->va = RB_ROOT;
+ vm->va = RB_ROOT_CACHED;
vm->client_id = atomic64_inc_return(&adev->vm_manager.client_counter);
for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
vm->reserved_vmid[i] = NULL;
spin_lock_init(&vm->status_lock);
- INIT_LIST_HEAD(&vm->invalidated);
+ INIT_LIST_HEAD(&vm->moved);
INIT_LIST_HEAD(&vm->cleared);
INIT_LIST_HEAD(&vm->freed);
@@ -2425,10 +2489,17 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
if (r)
return r;
- if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE)
+ vm->pte_support_ats = false;
+
+ if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE) {
vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
AMDGPU_VM_USE_CPU_FOR_COMPUTE);
- else
+
+ if (adev->asic_type == CHIP_RAVEN) {
+ vm->pte_support_ats = true;
+ init_pde_value = AMDGPU_PTE_SYSTEM | AMDGPU_PDE_PTE;
+ }
+ } else
vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
AMDGPU_VM_USE_CPU_FOR_GFX);
DRM_DEBUG_DRIVER("VM update mode is %s\n",
@@ -2448,7 +2519,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
r = amdgpu_bo_create(adev, amdgpu_vm_bo_size(adev, 0), align, true,
AMDGPU_GEM_DOMAIN_VRAM,
flags,
- NULL, NULL, &vm->root.bo);
+ NULL, NULL, init_pde_value, &vm->root.bo);
if (r)
goto error_free_sched_entity;
@@ -2457,6 +2528,13 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
goto error_free_root;
vm->last_eviction_counter = atomic64_read(&adev->num_evictions);
+
+ if (vm->use_cpu_for_update) {
+ r = amdgpu_bo_kmap(vm->root.bo, NULL);
+ if (r)
+ goto error_free_root;
+ }
+
amdgpu_bo_unreserve(vm->root.bo);
return 0;
@@ -2512,10 +2590,11 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
amd_sched_entity_fini(vm->entity.sched, &vm->entity);
- if (!RB_EMPTY_ROOT(&vm->va)) {
+ if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
dev_err(adev->dev, "still active bo inside vm\n");
}
- rbtree_postorder_for_each_entry_safe(mapping, tmp, &vm->va, rb) {
+ rbtree_postorder_for_each_entry_safe(mapping, tmp,
+ &vm->va.rb_root, rb) {
list_del(&mapping->list);
amdgpu_vm_it_remove(mapping, &vm->va);
kfree(mapping);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index 936f158bc5ec..6716355403ec 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -50,9 +50,6 @@ struct amdgpu_bo_list_entry;
/* PTBs (Page Table Blocks) need to be aligned to 32K */
#define AMDGPU_VM_PTB_ALIGN_SIZE 32768
-/* LOG2 number of continuous pages for the fragment field */
-#define AMDGPU_LOG2_PAGES_PER_FRAG 4
-
#define AMDGPU_PTE_VALID (1ULL << 0)
#define AMDGPU_PTE_SYSTEM (1ULL << 1)
#define AMDGPU_PTE_SNOOPED (1ULL << 2)
@@ -68,6 +65,9 @@ struct amdgpu_bo_list_entry;
/* TILED for VEGA10, reserved for older ASICs */
#define AMDGPU_PTE_PRT (1ULL << 51)
+/* PDE is handled as PTE for VEGA10 */
+#define AMDGPU_PDE_PTE (1ULL << 54)
+
/* VEGA10 only */
#define AMDGPU_PTE_MTYPE(a) ((uint64_t)a << 57)
#define AMDGPU_PTE_MTYPE_MASK AMDGPU_PTE_MTYPE(3ULL)
@@ -94,6 +94,18 @@ struct amdgpu_bo_list_entry;
#define AMDGPU_VM_USE_CPU_FOR_GFX (1 << 0)
#define AMDGPU_VM_USE_CPU_FOR_COMPUTE (1 << 1)
+/* base structure for tracking BO usage in a VM */
+struct amdgpu_vm_bo_base {
+ /* constant after initialization */
+ struct amdgpu_vm *vm;
+ struct amdgpu_bo *bo;
+
+ /* protected by bo being reserved */
+ struct list_head bo_list;
+
+ /* protected by spinlock */
+ struct list_head vm_status;
+};
struct amdgpu_vm_pt {
struct amdgpu_bo *bo;
@@ -106,13 +118,13 @@ struct amdgpu_vm_pt {
struct amdgpu_vm {
/* tree of virtual addresses mapped */
- struct rb_root va;
+ struct rb_root_cached va;
/* protecting invalidated */
spinlock_t status_lock;
/* BOs moved, but not yet updated in the PT */
- struct list_head invalidated;
+ struct list_head moved;
/* BOs cleared in the PT because of a move */
struct list_head cleared;
@@ -135,11 +147,12 @@ struct amdgpu_vm {
u64 client_id;
/* dedicated to vm */
struct amdgpu_vm_id *reserved_vmid[AMDGPU_MAX_VMHUBS];
- /* each VM will map on CSA */
- struct amdgpu_bo_va *csa_bo_va;
/* Flag to indicate if VM tables are updated by CPU or GPU (SDMA) */
bool use_cpu_for_update;
+
+ /* Flag to indicate ATS support from PTE for GFX9 */
+ bool pte_support_ats;
};
struct amdgpu_vm_id {
@@ -182,6 +195,7 @@ struct amdgpu_vm_manager {
uint32_t num_level;
uint64_t vm_size;
uint32_t block_size;
+ uint32_t fragment_size;
/* vram base address for page table entry */
u64 vram_base_offset;
/* vm pte handling */
@@ -214,15 +228,13 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
int (*callback)(void *p, struct amdgpu_bo *bo),
void *param);
-void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
- struct amdgpu_vm *vm);
int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
uint64_t saddr, uint64_t size);
int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
struct amdgpu_sync *sync, struct dma_fence *fence,
struct amdgpu_job *job);
-int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job);
+int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync);
void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vmhub,
unsigned vmid);
void amdgpu_vm_reset_all_ids(struct amdgpu_device *adev);
@@ -231,8 +243,8 @@ int amdgpu_vm_update_directories(struct amdgpu_device *adev,
int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
struct dma_fence **fence);
-int amdgpu_vm_clear_invalids(struct amdgpu_device *adev, struct amdgpu_vm *vm,
- struct amdgpu_sync *sync);
+int amdgpu_vm_clear_moved(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ struct amdgpu_sync *sync);
int amdgpu_vm_bo_update(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va,
bool clear);
@@ -259,7 +271,10 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
uint64_t saddr, uint64_t size);
void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va);
-void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint64_t vm_size);
+void amdgpu_vm_set_fragment_size(struct amdgpu_device *adev,
+ uint32_t fragment_size_default);
+void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint64_t vm_size,
+ uint32_t fragment_size_default);
int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
struct amdgpu_job *job);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index a2c59a08b2bd..26e900627971 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -28,6 +28,8 @@
struct amdgpu_vram_mgr {
struct drm_mm mm;
spinlock_t lock;
+ atomic64_t usage;
+ atomic64_t vis_usage;
};
/**
@@ -79,6 +81,27 @@ static int amdgpu_vram_mgr_fini(struct ttm_mem_type_manager *man)
}
/**
+ * amdgpu_vram_mgr_vis_size - Calculate visible node size
+ *
+ * @adev: amdgpu device structure
+ * @node: MM node structure
+ *
+ * Calculate how many bytes of the MM node are inside visible VRAM
+ */
+static u64 amdgpu_vram_mgr_vis_size(struct amdgpu_device *adev,
+ struct drm_mm_node *node)
+{
+ uint64_t start = node->start << PAGE_SHIFT;
+ uint64_t end = (node->size + node->start) << PAGE_SHIFT;
+
+ if (start >= adev->mc.visible_vram_size)
+ return 0;
+
+ return (end > adev->mc.visible_vram_size ?
+ adev->mc.visible_vram_size : end) - start;
+}
+
+/**
* amdgpu_vram_mgr_new - allocate new ranges
*
* @man: TTM memory type manager
@@ -93,11 +116,13 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
const struct ttm_place *place,
struct ttm_mem_reg *mem)
{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev);
struct amdgpu_vram_mgr *mgr = man->priv;
struct drm_mm *mm = &mgr->mm;
struct drm_mm_node *nodes;
enum drm_mm_insert_mode mode;
unsigned long lpfn, num_nodes, pages_per_node, pages_left;
+ uint64_t usage = 0, vis_usage = 0;
unsigned i;
int r;
@@ -142,6 +167,9 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
if (unlikely(r))
goto error;
+ usage += nodes[i].size << PAGE_SHIFT;
+ vis_usage += amdgpu_vram_mgr_vis_size(adev, &nodes[i]);
+
/* Calculate a virtual BO start address to easily check if
* everything is CPU accessible.
*/
@@ -155,6 +183,9 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
}
spin_unlock(&mgr->lock);
+ atomic64_add(usage, &mgr->usage);
+ atomic64_add(vis_usage, &mgr->vis_usage);
+
mem->mm_node = nodes;
return 0;
@@ -181,8 +212,10 @@ error:
static void amdgpu_vram_mgr_del(struct ttm_mem_type_manager *man,
struct ttm_mem_reg *mem)
{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev);
struct amdgpu_vram_mgr *mgr = man->priv;
struct drm_mm_node *nodes = mem->mm_node;
+ uint64_t usage = 0, vis_usage = 0;
unsigned pages = mem->num_pages;
if (!mem->mm_node)
@@ -192,31 +225,67 @@ static void amdgpu_vram_mgr_del(struct ttm_mem_type_manager *man,
while (pages) {
pages -= nodes->size;
drm_mm_remove_node(nodes);
+ usage += nodes->size << PAGE_SHIFT;
+ vis_usage += amdgpu_vram_mgr_vis_size(adev, nodes);
++nodes;
}
spin_unlock(&mgr->lock);
+ atomic64_sub(usage, &mgr->usage);
+ atomic64_sub(vis_usage, &mgr->vis_usage);
+
kfree(mem->mm_node);
mem->mm_node = NULL;
}
/**
+ * amdgpu_vram_mgr_usage - how many bytes are used in this domain
+ *
+ * @man: TTM memory type manager
+ *
+ * Returns how many bytes are used in this domain.
+ */
+uint64_t amdgpu_vram_mgr_usage(struct ttm_mem_type_manager *man)
+{
+ struct amdgpu_vram_mgr *mgr = man->priv;
+
+ return atomic64_read(&mgr->usage);
+}
+
+/**
+ * amdgpu_vram_mgr_vis_usage - how many bytes are used in the visible part
+ *
+ * @man: TTM memory type manager
+ *
+ * Returns how many bytes are used in the visible part of VRAM
+ */
+uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_mem_type_manager *man)
+{
+ struct amdgpu_vram_mgr *mgr = man->priv;
+
+ return atomic64_read(&mgr->vis_usage);
+}
+
+/**
* amdgpu_vram_mgr_debug - dump VRAM table
*
* @man: TTM memory type manager
- * @prefix: text prefix
+ * @printer: DRM printer to use
*
* Dump the table content using printk.
*/
static void amdgpu_vram_mgr_debug(struct ttm_mem_type_manager *man,
- const char *prefix)
+ struct drm_printer *printer)
{
struct amdgpu_vram_mgr *mgr = man->priv;
- struct drm_printer p = drm_debug_printer(prefix);
spin_lock(&mgr->lock);
- drm_mm_print(&mgr->mm, &p);
+ drm_mm_print(&mgr->mm, printer);
spin_unlock(&mgr->lock);
+
+ drm_printf(printer, "man size:%llu pages, ram usage:%lluMB, vis usage:%lluMB\n",
+ man->size, amdgpu_vram_mgr_usage(man) >> 20,
+ amdgpu_vram_mgr_vis_usage(man) >> 20);
}
const struct ttm_mem_type_manager_func amdgpu_vram_mgr_func = {
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
index 37a499ab30eb..567c4a5cf90c 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik.c
@@ -1824,21 +1824,14 @@ static int cik_common_suspend(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- amdgpu_amdkfd_suspend(adev);
-
return cik_common_hw_fini(adev);
}
static int cik_common_resume(void *handle)
{
- int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- r = cik_common_hw_init(adev);
- if (r)
- return r;
-
- return amdgpu_amdkfd_resume(adev);
+ return cik_common_hw_init(adev);
}
static bool cik_common_is_idle(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index c216e16826c9..f508f4d01e4a 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -342,6 +342,63 @@ static void cik_sdma_rlc_stop(struct amdgpu_device *adev)
}
/**
+ * cik_ctx_switch_enable - stop the async dma engines context switch
+ *
+ * @adev: amdgpu_device pointer
+ * @enable: enable/disable the DMA MEs context switch.
+ *
+ * Halt or unhalt the async dma engines context switch (VI).
+ */
+static void cik_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
+{
+ u32 f32_cntl, phase_quantum = 0;
+ int i;
+
+ if (amdgpu_sdma_phase_quantum) {
+ unsigned value = amdgpu_sdma_phase_quantum;
+ unsigned unit = 0;
+
+ while (value > (SDMA0_PHASE0_QUANTUM__VALUE_MASK >>
+ SDMA0_PHASE0_QUANTUM__VALUE__SHIFT)) {
+ value = (value + 1) >> 1;
+ unit++;
+ }
+ if (unit > (SDMA0_PHASE0_QUANTUM__UNIT_MASK >>
+ SDMA0_PHASE0_QUANTUM__UNIT__SHIFT)) {
+ value = (SDMA0_PHASE0_QUANTUM__VALUE_MASK >>
+ SDMA0_PHASE0_QUANTUM__VALUE__SHIFT);
+ unit = (SDMA0_PHASE0_QUANTUM__UNIT_MASK >>
+ SDMA0_PHASE0_QUANTUM__UNIT__SHIFT);
+ WARN_ONCE(1,
+ "clamping sdma_phase_quantum to %uK clock cycles\n",
+ value << unit);
+ }
+ phase_quantum =
+ value << SDMA0_PHASE0_QUANTUM__VALUE__SHIFT |
+ unit << SDMA0_PHASE0_QUANTUM__UNIT__SHIFT;
+ }
+
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ f32_cntl = RREG32(mmSDMA0_CNTL + sdma_offsets[i]);
+ if (enable) {
+ f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
+ AUTO_CTXSW_ENABLE, 1);
+ if (amdgpu_sdma_phase_quantum) {
+ WREG32(mmSDMA0_PHASE0_QUANTUM + sdma_offsets[i],
+ phase_quantum);
+ WREG32(mmSDMA0_PHASE1_QUANTUM + sdma_offsets[i],
+ phase_quantum);
+ }
+ } else {
+ f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
+ AUTO_CTXSW_ENABLE, 0);
+ }
+
+ WREG32(mmSDMA0_CNTL + sdma_offsets[i], f32_cntl);
+ }
+}
+
+/**
* cik_sdma_enable - stop the async dma engines
*
* @adev: amdgpu_device pointer
@@ -537,6 +594,8 @@ static int cik_sdma_start(struct amdgpu_device *adev)
/* halt the engine before programing */
cik_sdma_enable(adev, false);
+ /* enable sdma ring preemption */
+ cik_ctx_switch_enable(adev, true);
/* start the gfx rings and rlc compute queues */
r = cik_sdma_gfx_resume(adev);
@@ -984,6 +1043,7 @@ static int cik_sdma_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ cik_ctx_switch_enable(adev, false);
cik_sdma_enable(adev, false);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index 9f78c03a2e31..4e519dc42916 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -484,134 +484,6 @@ static bool dce_v10_0_is_display_hung(struct amdgpu_device *adev)
return true;
}
-static void dce_v10_0_stop_mc_access(struct amdgpu_device *adev,
- struct amdgpu_mode_mc_save *save)
-{
- u32 crtc_enabled, tmp;
- int i;
-
- save->vga_render_control = RREG32(mmVGA_RENDER_CONTROL);
- save->vga_hdp_control = RREG32(mmVGA_HDP_CONTROL);
-
- /* disable VGA render */
- tmp = RREG32(mmVGA_RENDER_CONTROL);
- tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
- WREG32(mmVGA_RENDER_CONTROL, tmp);
-
- /* blank the display controllers */
- for (i = 0; i < adev->mode_info.num_crtc; i++) {
- crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]),
- CRTC_CONTROL, CRTC_MASTER_EN);
- if (crtc_enabled) {
-#if 0
- u32 frame_count;
- int j;
-
- save->crtc_enabled[i] = true;
- tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]);
- if (REG_GET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN) == 0) {
- amdgpu_display_vblank_wait(adev, i);
- WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
- tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 1);
- WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
- WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
- }
- /* wait for the next frame */
- frame_count = amdgpu_display_vblank_get_counter(adev, i);
- for (j = 0; j < adev->usec_timeout; j++) {
- if (amdgpu_display_vblank_get_counter(adev, i) != frame_count)
- break;
- udelay(1);
- }
- tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]);
- if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK) == 0) {
- tmp = REG_SET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK, 1);
- WREG32(mmGRPH_UPDATE + crtc_offsets[i], tmp);
- }
- tmp = RREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i]);
- if (REG_GET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK) == 0) {
- tmp = REG_SET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK, 1);
- WREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
- }
-#else
- /* XXX this is a hack to avoid strange behavior with EFI on certain systems */
- WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
- tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
- tmp = REG_SET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN, 0);
- WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp);
- WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
- save->crtc_enabled[i] = false;
- /* ***** */
-#endif
- } else {
- save->crtc_enabled[i] = false;
- }
- }
-}
-
-static void dce_v10_0_resume_mc_access(struct amdgpu_device *adev,
- struct amdgpu_mode_mc_save *save)
-{
- u32 tmp, frame_count;
- int i, j;
-
- /* update crtc base addresses */
- for (i = 0; i < adev->mode_info.num_crtc; i++) {
- WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
- upper_32_bits(adev->mc.vram_start));
- WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
- upper_32_bits(adev->mc.vram_start));
- WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i],
- (u32)adev->mc.vram_start);
- WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i],
- (u32)adev->mc.vram_start);
-
- if (save->crtc_enabled[i]) {
- tmp = RREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i]);
- if (REG_GET_FIELD(tmp, MASTER_UPDATE_MODE, MASTER_UPDATE_MODE) != 0) {
- tmp = REG_SET_FIELD(tmp, MASTER_UPDATE_MODE, MASTER_UPDATE_MODE, 0);
- WREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i], tmp);
- }
- tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]);
- if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK)) {
- tmp = REG_SET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK, 0);
- WREG32(mmGRPH_UPDATE + crtc_offsets[i], tmp);
- }
- tmp = RREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i]);
- if (REG_GET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK)) {
- tmp = REG_SET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK, 0);
- WREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
- }
- for (j = 0; j < adev->usec_timeout; j++) {
- tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]);
- if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_SURFACE_UPDATE_PENDING) == 0)
- break;
- udelay(1);
- }
- tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]);
- tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 0);
- WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
- WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
- WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
- /* wait for the next frame */
- frame_count = amdgpu_display_vblank_get_counter(adev, i);
- for (j = 0; j < adev->usec_timeout; j++) {
- if (amdgpu_display_vblank_get_counter(adev, i) != frame_count)
- break;
- udelay(1);
- }
- }
- }
-
- WREG32(mmVGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(adev->mc.vram_start));
- WREG32(mmVGA_MEMORY_BASE_ADDRESS, lower_32_bits(adev->mc.vram_start));
-
- /* Unlock vga access */
- WREG32(mmVGA_HDP_CONTROL, save->vga_hdp_control);
- mdelay(1);
- WREG32(mmVGA_RENDER_CONTROL, save->vga_render_control);
-}
-
static void dce_v10_0_set_vga_render_state(struct amdgpu_device *adev,
bool render)
{
@@ -1867,7 +1739,7 @@ static void dce_v10_0_afmt_setmode(struct drm_encoder *encoder,
dce_v10_0_audio_write_sad_regs(encoder);
dce_v10_0_audio_write_latency_fields(encoder, mode);
- err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
+ err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode, false);
if (err < 0) {
DRM_ERROR("failed to setup AVI infoframe: %zd\n", err);
return;
@@ -2267,6 +2139,7 @@ static void dce_v10_0_crtc_load_lut(struct drm_crtc *crtc)
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct amdgpu_device *adev = dev->dev_private;
+ u16 *r, *g, *b;
int i;
u32 tmp;
@@ -2304,11 +2177,14 @@ static void dce_v10_0_crtc_load_lut(struct drm_crtc *crtc)
WREG32(mmDC_LUT_WRITE_EN_MASK + amdgpu_crtc->crtc_offset, 0x00000007);
WREG32(mmDC_LUT_RW_INDEX + amdgpu_crtc->crtc_offset, 0);
+ r = crtc->gamma_store;
+ g = r + crtc->gamma_size;
+ b = g + crtc->gamma_size;
for (i = 0; i < 256; i++) {
WREG32(mmDC_LUT_30_COLOR + amdgpu_crtc->crtc_offset,
- (amdgpu_crtc->lut_r[i] << 20) |
- (amdgpu_crtc->lut_g[i] << 10) |
- (amdgpu_crtc->lut_b[i] << 0));
+ ((*r++ & 0xffc0) << 14) |
+ ((*g++ & 0xffc0) << 4) |
+ (*b++ >> 6));
}
tmp = RREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset);
@@ -2555,7 +2431,7 @@ static int dce_v10_0_crtc_cursor_set2(struct drm_crtc *crtc,
aobj = gem_to_amdgpu_bo(obj);
ret = amdgpu_bo_reserve(aobj, false);
if (ret != 0) {
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return ret;
}
@@ -2563,7 +2439,7 @@ static int dce_v10_0_crtc_cursor_set2(struct drm_crtc *crtc,
amdgpu_bo_unreserve(aobj);
if (ret) {
DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return ret;
}
@@ -2597,7 +2473,7 @@ unpin:
amdgpu_bo_unpin(aobj);
amdgpu_bo_unreserve(aobj);
}
- drm_gem_object_unreference_unlocked(amdgpu_crtc->cursor_bo);
+ drm_gem_object_put_unlocked(amdgpu_crtc->cursor_bo);
}
amdgpu_crtc->cursor_bo = obj;
@@ -2624,15 +2500,6 @@ static int dce_v10_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
u16 *blue, uint32_t size,
struct drm_modeset_acquire_ctx *ctx)
{
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- int i;
-
- /* userspace palettes are always correct as is */
- for (i = 0; i < size; i++) {
- amdgpu_crtc->lut_r[i] = red[i] >> 6;
- amdgpu_crtc->lut_g[i] = green[i] >> 6;
- amdgpu_crtc->lut_b[i] = blue[i] >> 6;
- }
dce_v10_0_crtc_load_lut(crtc);
return 0;
@@ -2844,14 +2711,12 @@ static const struct drm_crtc_helper_funcs dce_v10_0_crtc_helper_funcs = {
.mode_set_base_atomic = dce_v10_0_crtc_set_base_atomic,
.prepare = dce_v10_0_crtc_prepare,
.commit = dce_v10_0_crtc_commit,
- .load_lut = dce_v10_0_crtc_load_lut,
.disable = dce_v10_0_crtc_disable,
};
static int dce_v10_0_crtc_init(struct amdgpu_device *adev, int index)
{
struct amdgpu_crtc *amdgpu_crtc;
- int i;
amdgpu_crtc = kzalloc(sizeof(struct amdgpu_crtc) +
(AMDGPUFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
@@ -2869,12 +2734,6 @@ static int dce_v10_0_crtc_init(struct amdgpu_device *adev, int index)
adev->ddev->mode_config.cursor_width = amdgpu_crtc->max_cursor_width;
adev->ddev->mode_config.cursor_height = amdgpu_crtc->max_cursor_height;
- for (i = 0; i < 256; i++) {
- amdgpu_crtc->lut_r[i] = i << 2;
- amdgpu_crtc->lut_g[i] = i << 2;
- amdgpu_crtc->lut_b[i] = i << 2;
- }
-
switch (amdgpu_crtc->crtc_id) {
case 0:
default:
@@ -3025,6 +2884,8 @@ static int dce_v10_0_hw_init(void *handle)
dce_v10_0_init_golden_registers(adev);
+ /* disable vga render */
+ dce_v10_0_set_vga_render_state(adev, false);
/* init dig PHYs, disp eng pll */
amdgpu_atombios_encoder_init_dig(adev);
amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk);
@@ -3737,7 +3598,6 @@ static void dce_v10_0_encoder_add(struct amdgpu_device *adev,
}
static const struct amdgpu_display_funcs dce_v10_0_display_funcs = {
- .set_vga_render_state = &dce_v10_0_set_vga_render_state,
.bandwidth_update = &dce_v10_0_bandwidth_update,
.vblank_get_counter = &dce_v10_0_vblank_get_counter,
.vblank_wait = &dce_v10_0_vblank_wait,
@@ -3750,8 +3610,6 @@ static const struct amdgpu_display_funcs dce_v10_0_display_funcs = {
.page_flip_get_scanoutpos = &dce_v10_0_crtc_get_scanoutpos,
.add_encoder = &dce_v10_0_encoder_add,
.add_connector = &amdgpu_connector_add,
- .stop_mc_access = &dce_v10_0_stop_mc_access,
- .resume_mc_access = &dce_v10_0_resume_mc_access,
};
static void dce_v10_0_set_display_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index 4bcf01dc567a..11edc75edaa9 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -499,79 +499,6 @@ static bool dce_v11_0_is_display_hung(struct amdgpu_device *adev)
return true;
}
-static void dce_v11_0_stop_mc_access(struct amdgpu_device *adev,
- struct amdgpu_mode_mc_save *save)
-{
- u32 crtc_enabled, tmp;
- int i;
-
- save->vga_render_control = RREG32(mmVGA_RENDER_CONTROL);
- save->vga_hdp_control = RREG32(mmVGA_HDP_CONTROL);
-
- /* disable VGA render */
- tmp = RREG32(mmVGA_RENDER_CONTROL);
- tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
- WREG32(mmVGA_RENDER_CONTROL, tmp);
-
- /* blank the display controllers */
- for (i = 0; i < adev->mode_info.num_crtc; i++) {
- crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]),
- CRTC_CONTROL, CRTC_MASTER_EN);
- if (crtc_enabled) {
-#if 1
- save->crtc_enabled[i] = true;
- tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]);
- if (REG_GET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN) == 0) {
- /*it is correct only for RGB ; black is 0*/
- WREG32(mmCRTC_BLANK_DATA_COLOR + crtc_offsets[i], 0);
- tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 1);
- WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
- }
-#else
- /* XXX this is a hack to avoid strange behavior with EFI on certain systems */
- WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
- tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
- tmp = REG_SET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN, 0);
- WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp);
- WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
- save->crtc_enabled[i] = false;
- /* ***** */
-#endif
- } else {
- save->crtc_enabled[i] = false;
- }
- }
-}
-
-static void dce_v11_0_resume_mc_access(struct amdgpu_device *adev,
- struct amdgpu_mode_mc_save *save)
-{
- u32 tmp;
- int i;
-
- /* update crtc base addresses */
- for (i = 0; i < adev->mode_info.num_crtc; i++) {
- WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
- upper_32_bits(adev->mc.vram_start));
- WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i],
- (u32)adev->mc.vram_start);
-
- if (save->crtc_enabled[i]) {
- tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]);
- tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 0);
- WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
- }
- }
-
- WREG32(mmVGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(adev->mc.vram_start));
- WREG32(mmVGA_MEMORY_BASE_ADDRESS, lower_32_bits(adev->mc.vram_start));
-
- /* Unlock vga access */
- WREG32(mmVGA_HDP_CONTROL, save->vga_hdp_control);
- mdelay(1);
- WREG32(mmVGA_RENDER_CONTROL, save->vga_render_control);
-}
-
static void dce_v11_0_set_vga_render_state(struct amdgpu_device *adev,
bool render)
{
@@ -1851,7 +1778,7 @@ static void dce_v11_0_afmt_setmode(struct drm_encoder *encoder,
dce_v11_0_audio_write_sad_regs(encoder);
dce_v11_0_audio_write_latency_fields(encoder, mode);
- err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
+ err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode, false);
if (err < 0) {
DRM_ERROR("failed to setup AVI infoframe: %zd\n", err);
return;
@@ -2251,6 +2178,7 @@ static void dce_v11_0_crtc_load_lut(struct drm_crtc *crtc)
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct amdgpu_device *adev = dev->dev_private;
+ u16 *r, *g, *b;
int i;
u32 tmp;
@@ -2282,11 +2210,14 @@ static void dce_v11_0_crtc_load_lut(struct drm_crtc *crtc)
WREG32(mmDC_LUT_WRITE_EN_MASK + amdgpu_crtc->crtc_offset, 0x00000007);
WREG32(mmDC_LUT_RW_INDEX + amdgpu_crtc->crtc_offset, 0);
+ r = crtc->gamma_store;
+ g = r + crtc->gamma_size;
+ b = g + crtc->gamma_size;
for (i = 0; i < 256; i++) {
WREG32(mmDC_LUT_30_COLOR + amdgpu_crtc->crtc_offset,
- (amdgpu_crtc->lut_r[i] << 20) |
- (amdgpu_crtc->lut_g[i] << 10) |
- (amdgpu_crtc->lut_b[i] << 0));
+ ((*r++ & 0xffc0) << 14) |
+ ((*g++ & 0xffc0) << 4) |
+ (*b++ >> 6));
}
tmp = RREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset);
@@ -2575,7 +2506,7 @@ static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc,
aobj = gem_to_amdgpu_bo(obj);
ret = amdgpu_bo_reserve(aobj, false);
if (ret != 0) {
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return ret;
}
@@ -2583,7 +2514,7 @@ static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc,
amdgpu_bo_unreserve(aobj);
if (ret) {
DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return ret;
}
@@ -2617,7 +2548,7 @@ unpin:
amdgpu_bo_unpin(aobj);
amdgpu_bo_unreserve(aobj);
}
- drm_gem_object_unreference_unlocked(amdgpu_crtc->cursor_bo);
+ drm_gem_object_put_unlocked(amdgpu_crtc->cursor_bo);
}
amdgpu_crtc->cursor_bo = obj;
@@ -2644,15 +2575,6 @@ static int dce_v11_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
u16 *blue, uint32_t size,
struct drm_modeset_acquire_ctx *ctx)
{
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- int i;
-
- /* userspace palettes are always correct as is */
- for (i = 0; i < size; i++) {
- amdgpu_crtc->lut_r[i] = red[i] >> 6;
- amdgpu_crtc->lut_g[i] = green[i] >> 6;
- amdgpu_crtc->lut_b[i] = blue[i] >> 6;
- }
dce_v11_0_crtc_load_lut(crtc);
return 0;
@@ -2892,14 +2814,12 @@ static const struct drm_crtc_helper_funcs dce_v11_0_crtc_helper_funcs = {
.mode_set_base_atomic = dce_v11_0_crtc_set_base_atomic,
.prepare = dce_v11_0_crtc_prepare,
.commit = dce_v11_0_crtc_commit,
- .load_lut = dce_v11_0_crtc_load_lut,
.disable = dce_v11_0_crtc_disable,
};
static int dce_v11_0_crtc_init(struct amdgpu_device *adev, int index)
{
struct amdgpu_crtc *amdgpu_crtc;
- int i;
amdgpu_crtc = kzalloc(sizeof(struct amdgpu_crtc) +
(AMDGPUFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
@@ -2917,12 +2837,6 @@ static int dce_v11_0_crtc_init(struct amdgpu_device *adev, int index)
adev->ddev->mode_config.cursor_width = amdgpu_crtc->max_cursor_width;
adev->ddev->mode_config.cursor_height = amdgpu_crtc->max_cursor_height;
- for (i = 0; i < 256; i++) {
- amdgpu_crtc->lut_r[i] = i << 2;
- amdgpu_crtc->lut_g[i] = i << 2;
- amdgpu_crtc->lut_b[i] = i << 2;
- }
-
switch (amdgpu_crtc->crtc_id) {
case 0:
default:
@@ -3086,6 +3000,8 @@ static int dce_v11_0_hw_init(void *handle)
dce_v11_0_init_golden_registers(adev);
+ /* disable vga render */
+ dce_v11_0_set_vga_render_state(adev, false);
/* init dig PHYs, disp eng pll */
amdgpu_atombios_crtc_powergate_init(adev);
amdgpu_atombios_encoder_init_dig(adev);
@@ -3806,7 +3722,6 @@ static void dce_v11_0_encoder_add(struct amdgpu_device *adev,
}
static const struct amdgpu_display_funcs dce_v11_0_display_funcs = {
- .set_vga_render_state = &dce_v11_0_set_vga_render_state,
.bandwidth_update = &dce_v11_0_bandwidth_update,
.vblank_get_counter = &dce_v11_0_vblank_get_counter,
.vblank_wait = &dce_v11_0_vblank_wait,
@@ -3819,8 +3734,6 @@ static const struct amdgpu_display_funcs dce_v11_0_display_funcs = {
.page_flip_get_scanoutpos = &dce_v11_0_crtc_get_scanoutpos,
.add_encoder = &dce_v11_0_encoder_add,
.add_connector = &amdgpu_connector_add,
- .stop_mc_access = &dce_v11_0_stop_mc_access,
- .resume_mc_access = &dce_v11_0_resume_mc_access,
};
static void dce_v11_0_set_display_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index fd134a4629d7..a51e35f824a1 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -42,6 +42,7 @@
#include "dce/dce_6_0_d.h"
#include "dce/dce_6_0_sh_mask.h"
#include "gca/gfx_7_2_enum.h"
+#include "dce_v6_0.h"
#include "si_enums.h"
static void dce_v6_0_set_display_funcs(struct amdgpu_device *adev);
@@ -392,117 +393,6 @@ static u32 dce_v6_0_hpd_get_gpio_reg(struct amdgpu_device *adev)
return mmDC_GPIO_HPD_A;
}
-static u32 evergreen_get_vblank_counter(struct amdgpu_device* adev, int crtc)
-{
- if (crtc >= adev->mode_info.num_crtc)
- return 0;
- else
- return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
-}
-
-static void dce_v6_0_stop_mc_access(struct amdgpu_device *adev,
- struct amdgpu_mode_mc_save *save)
-{
- u32 crtc_enabled, tmp, frame_count;
- int i, j;
-
- save->vga_render_control = RREG32(mmVGA_RENDER_CONTROL);
- save->vga_hdp_control = RREG32(mmVGA_HDP_CONTROL);
-
- /* disable VGA render */
- WREG32(mmVGA_RENDER_CONTROL, 0);
-
- /* blank the display controllers */
- for (i = 0; i < adev->mode_info.num_crtc; i++) {
- crtc_enabled = RREG32(mmCRTC_CONTROL + crtc_offsets[i]) & CRTC_CONTROL__CRTC_MASTER_EN_MASK;
- if (crtc_enabled) {
- save->crtc_enabled[i] = true;
- tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]);
-
- if (!(tmp & CRTC_BLANK_CONTROL__CRTC_BLANK_DATA_EN_MASK)) {
- dce_v6_0_vblank_wait(adev, i);
- WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
- tmp |= CRTC_BLANK_CONTROL__CRTC_BLANK_DATA_EN_MASK;
- WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
- WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
- }
- /* wait for the next frame */
- frame_count = evergreen_get_vblank_counter(adev, i);
- for (j = 0; j < adev->usec_timeout; j++) {
- if (evergreen_get_vblank_counter(adev, i) != frame_count)
- break;
- udelay(1);
- }
-
- /* XXX this is a hack to avoid strange behavior with EFI on certain systems */
- WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
- tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
- tmp &= ~CRTC_CONTROL__CRTC_MASTER_EN_MASK;
- WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp);
- WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
- save->crtc_enabled[i] = false;
- /* ***** */
- } else {
- save->crtc_enabled[i] = false;
- }
- }
-}
-
-static void dce_v6_0_resume_mc_access(struct amdgpu_device *adev,
- struct amdgpu_mode_mc_save *save)
-{
- u32 tmp;
- int i, j;
-
- /* update crtc base addresses */
- for (i = 0; i < adev->mode_info.num_crtc; i++) {
- WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
- upper_32_bits(adev->mc.vram_start));
- WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
- upper_32_bits(adev->mc.vram_start));
- WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i],
- (u32)adev->mc.vram_start);
- WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i],
- (u32)adev->mc.vram_start);
- }
-
- WREG32(mmVGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(adev->mc.vram_start));
- WREG32(mmVGA_MEMORY_BASE_ADDRESS, (u32)adev->mc.vram_start);
-
- /* unlock regs and wait for update */
- for (i = 0; i < adev->mode_info.num_crtc; i++) {
- if (save->crtc_enabled[i]) {
- tmp = RREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i]);
- if ((tmp & 0x7) != 0) {
- tmp &= ~0x7;
- WREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i], tmp);
- }
- tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]);
- if (tmp & GRPH_UPDATE__GRPH_UPDATE_LOCK_MASK) {
- tmp &= ~GRPH_UPDATE__GRPH_UPDATE_LOCK_MASK;
- WREG32(mmGRPH_UPDATE + crtc_offsets[i], tmp);
- }
- tmp = RREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i]);
- if (tmp & 1) {
- tmp &= ~1;
- WREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
- }
- for (j = 0; j < adev->usec_timeout; j++) {
- tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]);
- if ((tmp & GRPH_UPDATE__GRPH_SURFACE_UPDATE_PENDING_MASK) == 0)
- break;
- udelay(1);
- }
- }
- }
-
- /* Unlock vga access */
- WREG32(mmVGA_HDP_CONTROL, save->vga_hdp_control);
- mdelay(1);
- WREG32(mmVGA_RENDER_CONTROL, save->vga_render_control);
-
-}
-
static void dce_v6_0_set_vga_render_state(struct amdgpu_device *adev,
bool render)
{
@@ -1597,7 +1487,7 @@ static void dce_v6_0_audio_set_avi_infoframe(struct drm_encoder *encoder,
ssize_t err;
u32 tmp;
- err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
+ err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode, false);
if (err < 0) {
DRM_ERROR("failed to setup AVI infoframe: %zd\n", err);
return;
@@ -2182,6 +2072,7 @@ static void dce_v6_0_crtc_load_lut(struct drm_crtc *crtc)
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct amdgpu_device *adev = dev->dev_private;
+ u16 *r, *g, *b;
int i;
DRM_DEBUG_KMS("%d\n", amdgpu_crtc->crtc_id);
@@ -2211,11 +2102,14 @@ static void dce_v6_0_crtc_load_lut(struct drm_crtc *crtc)
WREG32(mmDC_LUT_WRITE_EN_MASK + amdgpu_crtc->crtc_offset, 0x00000007);
WREG32(mmDC_LUT_RW_INDEX + amdgpu_crtc->crtc_offset, 0);
+ r = crtc->gamma_store;
+ g = r + crtc->gamma_size;
+ b = g + crtc->gamma_size;
for (i = 0; i < 256; i++) {
WREG32(mmDC_LUT_30_COLOR + amdgpu_crtc->crtc_offset,
- (amdgpu_crtc->lut_r[i] << 20) |
- (amdgpu_crtc->lut_g[i] << 10) |
- (amdgpu_crtc->lut_b[i] << 0));
+ ((*r++ & 0xffc0) << 14) |
+ ((*g++ & 0xffc0) << 4) |
+ (*b++ >> 6));
}
WREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
@@ -2428,7 +2322,7 @@ static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc,
aobj = gem_to_amdgpu_bo(obj);
ret = amdgpu_bo_reserve(aobj, false);
if (ret != 0) {
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return ret;
}
@@ -2436,7 +2330,7 @@ static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc,
amdgpu_bo_unreserve(aobj);
if (ret) {
DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return ret;
}
@@ -2470,7 +2364,7 @@ unpin:
amdgpu_bo_unpin(aobj);
amdgpu_bo_unreserve(aobj);
}
- drm_gem_object_unreference_unlocked(amdgpu_crtc->cursor_bo);
+ drm_gem_object_put_unlocked(amdgpu_crtc->cursor_bo);
}
amdgpu_crtc->cursor_bo = obj;
@@ -2496,15 +2390,6 @@ static int dce_v6_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
u16 *blue, uint32_t size,
struct drm_modeset_acquire_ctx *ctx)
{
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- int i;
-
- /* userspace palettes are always correct as is */
- for (i = 0; i < size; i++) {
- amdgpu_crtc->lut_r[i] = red[i] >> 6;
- amdgpu_crtc->lut_g[i] = green[i] >> 6;
- amdgpu_crtc->lut_b[i] = blue[i] >> 6;
- }
dce_v6_0_crtc_load_lut(crtc);
return 0;
@@ -2712,14 +2597,12 @@ static const struct drm_crtc_helper_funcs dce_v6_0_crtc_helper_funcs = {
.mode_set_base_atomic = dce_v6_0_crtc_set_base_atomic,
.prepare = dce_v6_0_crtc_prepare,
.commit = dce_v6_0_crtc_commit,
- .load_lut = dce_v6_0_crtc_load_lut,
.disable = dce_v6_0_crtc_disable,
};
static int dce_v6_0_crtc_init(struct amdgpu_device *adev, int index)
{
struct amdgpu_crtc *amdgpu_crtc;
- int i;
amdgpu_crtc = kzalloc(sizeof(struct amdgpu_crtc) +
(AMDGPUFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
@@ -2737,12 +2620,6 @@ static int dce_v6_0_crtc_init(struct amdgpu_device *adev, int index)
adev->ddev->mode_config.cursor_width = amdgpu_crtc->max_cursor_width;
adev->ddev->mode_config.cursor_height = amdgpu_crtc->max_cursor_height;
- for (i = 0; i < 256; i++) {
- amdgpu_crtc->lut_r[i] = i << 2;
- amdgpu_crtc->lut_g[i] = i << 2;
- amdgpu_crtc->lut_b[i] = i << 2;
- }
-
amdgpu_crtc->crtc_offset = crtc_offsets[amdgpu_crtc->crtc_id];
amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
@@ -2873,6 +2750,8 @@ static int dce_v6_0_hw_init(void *handle)
int i;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ /* disable vga render */
+ dce_v6_0_set_vga_render_state(adev, false);
/* init dig PHYs, disp eng pll */
amdgpu_atombios_encoder_init_dig(adev);
amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk);
@@ -3525,7 +3404,6 @@ static void dce_v6_0_encoder_add(struct amdgpu_device *adev,
}
static const struct amdgpu_display_funcs dce_v6_0_display_funcs = {
- .set_vga_render_state = &dce_v6_0_set_vga_render_state,
.bandwidth_update = &dce_v6_0_bandwidth_update,
.vblank_get_counter = &dce_v6_0_vblank_get_counter,
.vblank_wait = &dce_v6_0_vblank_wait,
@@ -3538,8 +3416,6 @@ static const struct amdgpu_display_funcs dce_v6_0_display_funcs = {
.page_flip_get_scanoutpos = &dce_v6_0_crtc_get_scanoutpos,
.add_encoder = &dce_v6_0_encoder_add,
.add_connector = &amdgpu_connector_add,
- .stop_mc_access = &dce_v6_0_stop_mc_access,
- .resume_mc_access = &dce_v6_0_resume_mc_access,
};
static void dce_v6_0_set_display_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index a9e869554627..9cf14b8b2db9 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -419,81 +419,6 @@ static bool dce_v8_0_is_display_hung(struct amdgpu_device *adev)
return true;
}
-static void dce_v8_0_stop_mc_access(struct amdgpu_device *adev,
- struct amdgpu_mode_mc_save *save)
-{
- u32 crtc_enabled, tmp;
- int i;
-
- save->vga_render_control = RREG32(mmVGA_RENDER_CONTROL);
- save->vga_hdp_control = RREG32(mmVGA_HDP_CONTROL);
-
- /* disable VGA render */
- tmp = RREG32(mmVGA_RENDER_CONTROL);
- tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
- WREG32(mmVGA_RENDER_CONTROL, tmp);
-
- /* blank the display controllers */
- for (i = 0; i < adev->mode_info.num_crtc; i++) {
- crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]),
- CRTC_CONTROL, CRTC_MASTER_EN);
- if (crtc_enabled) {
-#if 1
- save->crtc_enabled[i] = true;
- tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]);
- if (REG_GET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN) == 0) {
- /*it is correct only for RGB ; black is 0*/
- WREG32(mmCRTC_BLANK_DATA_COLOR + crtc_offsets[i], 0);
- tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 1);
- WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
- }
- mdelay(20);
-#else
- /* XXX this is a hack to avoid strange behavior with EFI on certain systems */
- WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
- tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
- tmp = REG_SET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN, 0);
- WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp);
- WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
- save->crtc_enabled[i] = false;
- /* ***** */
-#endif
- } else {
- save->crtc_enabled[i] = false;
- }
- }
-}
-
-static void dce_v8_0_resume_mc_access(struct amdgpu_device *adev,
- struct amdgpu_mode_mc_save *save)
-{
- u32 tmp;
- int i;
-
- /* update crtc base addresses */
- for (i = 0; i < adev->mode_info.num_crtc; i++) {
- WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
- upper_32_bits(adev->mc.vram_start));
- WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i],
- (u32)adev->mc.vram_start);
-
- if (save->crtc_enabled[i]) {
- tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]);
- tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 0);
- WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
- }
- mdelay(20);
- }
-
- WREG32(mmVGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(adev->mc.vram_start));
- WREG32(mmVGA_MEMORY_BASE_ADDRESS, lower_32_bits(adev->mc.vram_start));
-
- /* Unlock vga access */
- WREG32(mmVGA_HDP_CONTROL, save->vga_hdp_control);
- mdelay(1);
- WREG32(mmVGA_RENDER_CONTROL, save->vga_render_control);
-}
-
static void dce_v8_0_set_vga_render_state(struct amdgpu_device *adev,
bool render)
{
@@ -1750,7 +1675,7 @@ static void dce_v8_0_afmt_setmode(struct drm_encoder *encoder,
dce_v8_0_audio_write_sad_regs(encoder);
dce_v8_0_audio_write_latency_fields(encoder, mode);
- err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
+ err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode, false);
if (err < 0) {
DRM_ERROR("failed to setup AVI infoframe: %zd\n", err);
return;
@@ -2124,6 +2049,7 @@ static void dce_v8_0_crtc_load_lut(struct drm_crtc *crtc)
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct amdgpu_device *adev = dev->dev_private;
+ u16 *r, *g, *b;
int i;
DRM_DEBUG_KMS("%d\n", amdgpu_crtc->crtc_id);
@@ -2153,11 +2079,14 @@ static void dce_v8_0_crtc_load_lut(struct drm_crtc *crtc)
WREG32(mmDC_LUT_WRITE_EN_MASK + amdgpu_crtc->crtc_offset, 0x00000007);
WREG32(mmDC_LUT_RW_INDEX + amdgpu_crtc->crtc_offset, 0);
+ r = crtc->gamma_store;
+ g = r + crtc->gamma_size;
+ b = g + crtc->gamma_size;
for (i = 0; i < 256; i++) {
WREG32(mmDC_LUT_30_COLOR + amdgpu_crtc->crtc_offset,
- (amdgpu_crtc->lut_r[i] << 20) |
- (amdgpu_crtc->lut_g[i] << 10) |
- (amdgpu_crtc->lut_b[i] << 0));
+ ((*r++ & 0xffc0) << 14) |
+ ((*g++ & 0xffc0) << 4) |
+ (*b++ >> 6));
}
WREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
@@ -2406,7 +2335,7 @@ static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc,
aobj = gem_to_amdgpu_bo(obj);
ret = amdgpu_bo_reserve(aobj, false);
if (ret != 0) {
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return ret;
}
@@ -2414,7 +2343,7 @@ static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc,
amdgpu_bo_unreserve(aobj);
if (ret) {
DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return ret;
}
@@ -2448,7 +2377,7 @@ unpin:
amdgpu_bo_unpin(aobj);
amdgpu_bo_unreserve(aobj);
}
- drm_gem_object_unreference_unlocked(amdgpu_crtc->cursor_bo);
+ drm_gem_object_put_unlocked(amdgpu_crtc->cursor_bo);
}
amdgpu_crtc->cursor_bo = obj;
@@ -2475,15 +2404,6 @@ static int dce_v8_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
u16 *blue, uint32_t size,
struct drm_modeset_acquire_ctx *ctx)
{
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- int i;
-
- /* userspace palettes are always correct as is */
- for (i = 0; i < size; i++) {
- amdgpu_crtc->lut_r[i] = red[i] >> 6;
- amdgpu_crtc->lut_g[i] = green[i] >> 6;
- amdgpu_crtc->lut_b[i] = blue[i] >> 6;
- }
dce_v8_0_crtc_load_lut(crtc);
return 0;
@@ -2702,14 +2622,12 @@ static const struct drm_crtc_helper_funcs dce_v8_0_crtc_helper_funcs = {
.mode_set_base_atomic = dce_v8_0_crtc_set_base_atomic,
.prepare = dce_v8_0_crtc_prepare,
.commit = dce_v8_0_crtc_commit,
- .load_lut = dce_v8_0_crtc_load_lut,
.disable = dce_v8_0_crtc_disable,
};
static int dce_v8_0_crtc_init(struct amdgpu_device *adev, int index)
{
struct amdgpu_crtc *amdgpu_crtc;
- int i;
amdgpu_crtc = kzalloc(sizeof(struct amdgpu_crtc) +
(AMDGPUFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
@@ -2727,12 +2645,6 @@ static int dce_v8_0_crtc_init(struct amdgpu_device *adev, int index)
adev->ddev->mode_config.cursor_width = amdgpu_crtc->max_cursor_width;
adev->ddev->mode_config.cursor_height = amdgpu_crtc->max_cursor_height;
- for (i = 0; i < 256; i++) {
- amdgpu_crtc->lut_r[i] = i << 2;
- amdgpu_crtc->lut_g[i] = i << 2;
- amdgpu_crtc->lut_b[i] = i << 2;
- }
-
amdgpu_crtc->crtc_offset = crtc_offsets[amdgpu_crtc->crtc_id];
amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
@@ -2870,6 +2782,8 @@ static int dce_v8_0_hw_init(void *handle)
int i;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ /* disable vga render */
+ dce_v8_0_set_vga_render_state(adev, false);
/* init dig PHYs, disp eng pll */
amdgpu_atombios_encoder_init_dig(adev);
amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk);
@@ -3574,7 +3488,6 @@ static void dce_v8_0_encoder_add(struct amdgpu_device *adev,
}
static const struct amdgpu_display_funcs dce_v8_0_display_funcs = {
- .set_vga_render_state = &dce_v8_0_set_vga_render_state,
.bandwidth_update = &dce_v8_0_bandwidth_update,
.vblank_get_counter = &dce_v8_0_vblank_get_counter,
.vblank_wait = &dce_v8_0_vblank_wait,
@@ -3587,8 +3500,6 @@ static const struct amdgpu_display_funcs dce_v8_0_display_funcs = {
.page_flip_get_scanoutpos = &dce_v8_0_crtc_get_scanoutpos,
.add_encoder = &dce_v8_0_encoder_add,
.add_connector = &amdgpu_connector_add,
- .stop_mc_access = &dce_v8_0_stop_mc_access,
- .resume_mc_access = &dce_v8_0_resume_mc_access,
};
static void dce_v8_0_set_display_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
index 90bb08309a53..b9ee9073cb0d 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
@@ -95,62 +95,6 @@ static u32 dce_virtual_hpd_get_gpio_reg(struct amdgpu_device *adev)
return 0;
}
-static void dce_virtual_stop_mc_access(struct amdgpu_device *adev,
- struct amdgpu_mode_mc_save *save)
-{
- switch (adev->asic_type) {
-#ifdef CONFIG_DRM_AMDGPU_SI
- case CHIP_TAHITI:
- case CHIP_PITCAIRN:
- case CHIP_VERDE:
- case CHIP_OLAND:
- dce_v6_0_disable_dce(adev);
- break;
-#endif
-#ifdef CONFIG_DRM_AMDGPU_CIK
- case CHIP_BONAIRE:
- case CHIP_HAWAII:
- case CHIP_KAVERI:
- case CHIP_KABINI:
- case CHIP_MULLINS:
- dce_v8_0_disable_dce(adev);
- break;
-#endif
- case CHIP_FIJI:
- case CHIP_TONGA:
- dce_v10_0_disable_dce(adev);
- break;
- case CHIP_CARRIZO:
- case CHIP_STONEY:
- case CHIP_POLARIS10:
- case CHIP_POLARIS11:
- case CHIP_POLARIS12:
- dce_v11_0_disable_dce(adev);
- break;
- case CHIP_TOPAZ:
-#ifdef CONFIG_DRM_AMDGPU_SI
- case CHIP_HAINAN:
-#endif
- /* no DCE */
- return;
- default:
- DRM_ERROR("Virtual display unsupported ASIC type: 0x%X\n", adev->asic_type);
- }
-
- return;
-}
-static void dce_virtual_resume_mc_access(struct amdgpu_device *adev,
- struct amdgpu_mode_mc_save *save)
-{
- return;
-}
-
-static void dce_virtual_set_vga_render_state(struct amdgpu_device *adev,
- bool render)
-{
- return;
-}
-
/**
* dce_virtual_bandwidth_update - program display watermarks
*
@@ -168,16 +112,6 @@ static int dce_virtual_crtc_gamma_set(struct drm_crtc *crtc, u16 *red,
u16 *green, u16 *blue, uint32_t size,
struct drm_modeset_acquire_ctx *ctx)
{
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- int i;
-
- /* userspace palettes are always correct as is */
- for (i = 0; i < size; i++) {
- amdgpu_crtc->lut_r[i] = red[i] >> 6;
- amdgpu_crtc->lut_g[i] = green[i] >> 6;
- amdgpu_crtc->lut_b[i] = blue[i] >> 6;
- }
-
return 0;
}
@@ -289,11 +223,6 @@ static int dce_virtual_crtc_set_base(struct drm_crtc *crtc, int x, int y,
return 0;
}
-static void dce_virtual_crtc_load_lut(struct drm_crtc *crtc)
-{
- return;
-}
-
static int dce_virtual_crtc_set_base_atomic(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
int x, int y, enum mode_set_atomic state)
@@ -309,14 +238,12 @@ static const struct drm_crtc_helper_funcs dce_virtual_crtc_helper_funcs = {
.mode_set_base_atomic = dce_virtual_crtc_set_base_atomic,
.prepare = dce_virtual_crtc_prepare,
.commit = dce_virtual_crtc_commit,
- .load_lut = dce_virtual_crtc_load_lut,
.disable = dce_virtual_crtc_disable,
};
static int dce_virtual_crtc_init(struct amdgpu_device *adev, int index)
{
struct amdgpu_crtc *amdgpu_crtc;
- int i;
amdgpu_crtc = kzalloc(sizeof(struct amdgpu_crtc) +
(AMDGPUFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
@@ -329,12 +256,6 @@ static int dce_virtual_crtc_init(struct amdgpu_device *adev, int index)
amdgpu_crtc->crtc_id = index;
adev->mode_info.crtcs[index] = amdgpu_crtc;
- for (i = 0; i < 256; i++) {
- amdgpu_crtc->lut_r[i] = i << 2;
- amdgpu_crtc->lut_g[i] = i << 2;
- amdgpu_crtc->lut_b[i] = i << 2;
- }
-
amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
amdgpu_crtc->encoder = NULL;
amdgpu_crtc->connector = NULL;
@@ -522,6 +443,47 @@ static int dce_virtual_sw_fini(void *handle)
static int dce_virtual_hw_init(void *handle)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ switch (adev->asic_type) {
+#ifdef CONFIG_DRM_AMDGPU_SI
+ case CHIP_TAHITI:
+ case CHIP_PITCAIRN:
+ case CHIP_VERDE:
+ case CHIP_OLAND:
+ dce_v6_0_disable_dce(adev);
+ break;
+#endif
+#ifdef CONFIG_DRM_AMDGPU_CIK
+ case CHIP_BONAIRE:
+ case CHIP_HAWAII:
+ case CHIP_KAVERI:
+ case CHIP_KABINI:
+ case CHIP_MULLINS:
+ dce_v8_0_disable_dce(adev);
+ break;
+#endif
+ case CHIP_FIJI:
+ case CHIP_TONGA:
+ dce_v10_0_disable_dce(adev);
+ break;
+ case CHIP_CARRIZO:
+ case CHIP_STONEY:
+ case CHIP_POLARIS11:
+ case CHIP_POLARIS10:
+ dce_v11_0_disable_dce(adev);
+ break;
+ case CHIP_TOPAZ:
+#ifdef CONFIG_DRM_AMDGPU_SI
+ case CHIP_HAINAN:
+#endif
+ /* no DCE */
+ break;
+ case CHIP_VEGA10:
+ break;
+ default:
+ DRM_ERROR("Virtual display unsupported ASIC type: 0x%X\n", adev->asic_type);
+ }
return 0;
}
@@ -677,7 +639,6 @@ static int dce_virtual_connector_encoder_init(struct amdgpu_device *adev,
}
static const struct amdgpu_display_funcs dce_virtual_display_funcs = {
- .set_vga_render_state = &dce_virtual_set_vga_render_state,
.bandwidth_update = &dce_virtual_bandwidth_update,
.vblank_get_counter = &dce_virtual_vblank_get_counter,
.vblank_wait = &dce_virtual_vblank_wait,
@@ -690,8 +651,6 @@ static const struct amdgpu_display_funcs dce_virtual_display_funcs = {
.page_flip_get_scanoutpos = &dce_virtual_crtc_get_scanoutpos,
.add_encoder = NULL,
.add_connector = NULL,
- .stop_mc_access = &dce_virtual_stop_mc_access,
- .resume_mc_access = &dce_virtual_resume_mc_access,
};
static void dce_virtual_set_display_funcs(struct amdgpu_device *adev)
@@ -809,7 +768,7 @@ static const struct amdgpu_irq_src_funcs dce_virtual_crtc_irq_funcs = {
static void dce_virtual_set_irq_funcs(struct amdgpu_device *adev)
{
- adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_LAST;
+ adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_VBLANK6 + 1;
adev->crtc_irq.funcs = &dce_virtual_crtc_irq_funcs;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
index 5173ca1fd159..d228f5a99044 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
@@ -1573,7 +1573,7 @@ static void gfx_v6_0_gpu_init(struct amdgpu_device *adev)
static void gfx_v6_0_scratch_init(struct amdgpu_device *adev)
{
- adev->gfx.scratch.num_reg = 7;
+ adev->gfx.scratch.num_reg = 8;
adev->gfx.scratch.reg_base = mmSCRATCH_REG0;
adev->gfx.scratch.free_mask = (1u << adev->gfx.scratch.num_reg) - 1;
}
@@ -2217,40 +2217,9 @@ static void gfx_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
static void gfx_v6_0_rlc_fini(struct amdgpu_device *adev)
{
- int r;
-
- if (adev->gfx.rlc.save_restore_obj) {
- r = amdgpu_bo_reserve(adev->gfx.rlc.save_restore_obj, true);
- if (unlikely(r != 0))
- dev_warn(adev->dev, "(%d) reserve RLC sr bo failed\n", r);
- amdgpu_bo_unpin(adev->gfx.rlc.save_restore_obj);
- amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj);
-
- amdgpu_bo_unref(&adev->gfx.rlc.save_restore_obj);
- adev->gfx.rlc.save_restore_obj = NULL;
- }
-
- if (adev->gfx.rlc.clear_state_obj) {
- r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, true);
- if (unlikely(r != 0))
- dev_warn(adev->dev, "(%d) reserve RLC c bo failed\n", r);
- amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
- amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
-
- amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj);
- adev->gfx.rlc.clear_state_obj = NULL;
- }
-
- if (adev->gfx.rlc.cp_table_obj) {
- r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, true);
- if (unlikely(r != 0))
- dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r);
- amdgpu_bo_unpin(adev->gfx.rlc.cp_table_obj);
- amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
-
- amdgpu_bo_unref(&adev->gfx.rlc.cp_table_obj);
- adev->gfx.rlc.cp_table_obj = NULL;
- }
+ amdgpu_bo_free_kernel(&adev->gfx.rlc.save_restore_obj, NULL, NULL);
+ amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj, NULL, NULL);
+ amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj, NULL, NULL);
}
static int gfx_v6_0_rlc_init(struct amdgpu_device *adev)
@@ -2273,43 +2242,23 @@ static int gfx_v6_0_rlc_init(struct amdgpu_device *adev)
if (src_ptr) {
/* save restore block */
- if (adev->gfx.rlc.save_restore_obj == NULL) {
- r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
- NULL, NULL,
- &adev->gfx.rlc.save_restore_obj);
-
- if (r) {
- dev_warn(adev->dev, "(%d) create RLC sr bo failed\n", r);
- return r;
- }
- }
-
- r = amdgpu_bo_reserve(adev->gfx.rlc.save_restore_obj, false);
- if (unlikely(r != 0)) {
- gfx_v6_0_rlc_fini(adev);
- return r;
- }
- r = amdgpu_bo_pin(adev->gfx.rlc.save_restore_obj, AMDGPU_GEM_DOMAIN_VRAM,
- &adev->gfx.rlc.save_restore_gpu_addr);
+ r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &adev->gfx.rlc.save_restore_obj,
+ &adev->gfx.rlc.save_restore_gpu_addr,
+ (void **)&adev->gfx.rlc.sr_ptr);
if (r) {
- amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj);
- dev_warn(adev->dev, "(%d) pin RLC sr bo failed\n", r);
+ dev_warn(adev->dev, "(%d) create RLC sr bo failed\n",
+ r);
gfx_v6_0_rlc_fini(adev);
return r;
}
- r = amdgpu_bo_kmap(adev->gfx.rlc.save_restore_obj, (void **)&adev->gfx.rlc.sr_ptr);
- if (r) {
- dev_warn(adev->dev, "(%d) map RLC sr bo failed\n", r);
- gfx_v6_0_rlc_fini(adev);
- return r;
- }
/* write the sr buffer */
dst_ptr = adev->gfx.rlc.sr_ptr;
for (i = 0; i < adev->gfx.rlc.reg_list_size; i++)
dst_ptr[i] = cpu_to_le32(src_ptr[i]);
+
amdgpu_bo_kunmap(adev->gfx.rlc.save_restore_obj);
amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj);
}
@@ -2319,39 +2268,17 @@ static int gfx_v6_0_rlc_init(struct amdgpu_device *adev)
adev->gfx.rlc.clear_state_size = gfx_v6_0_get_csb_size(adev);
dws = adev->gfx.rlc.clear_state_size + (256 / 4);
- if (adev->gfx.rlc.clear_state_obj == NULL) {
- r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
- NULL, NULL,
- &adev->gfx.rlc.clear_state_obj);
-
- if (r) {
- dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r);
- gfx_v6_0_rlc_fini(adev);
- return r;
- }
- }
- r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false);
- if (unlikely(r != 0)) {
- gfx_v6_0_rlc_fini(adev);
- return r;
- }
- r = amdgpu_bo_pin(adev->gfx.rlc.clear_state_obj, AMDGPU_GEM_DOMAIN_VRAM,
- &adev->gfx.rlc.clear_state_gpu_addr);
+ r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &adev->gfx.rlc.clear_state_obj,
+ &adev->gfx.rlc.clear_state_gpu_addr,
+ (void **)&adev->gfx.rlc.cs_ptr);
if (r) {
- amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
- dev_warn(adev->dev, "(%d) pin RLC c bo failed\n", r);
+ dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r);
gfx_v6_0_rlc_fini(adev);
return r;
}
- r = amdgpu_bo_kmap(adev->gfx.rlc.clear_state_obj, (void **)&adev->gfx.rlc.cs_ptr);
- if (r) {
- dev_warn(adev->dev, "(%d) map RLC c bo failed\n", r);
- gfx_v6_0_rlc_fini(adev);
- return r;
- }
/* set up the cs buffer */
dst_ptr = adev->gfx.rlc.cs_ptr;
reg_list_mc_addr = adev->gfx.rlc.clear_state_gpu_addr + 256;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 37b45e4403d1..00868764a0dd 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -1823,7 +1823,7 @@ static void gfx_v7_0_setup_rb(struct amdgpu_device *adev)
}
/**
- * gmc_v7_0_init_compute_vmid - gart enable
+ * gfx_v7_0_init_compute_vmid - gart enable
*
* @adev: amdgpu_device pointer
*
@@ -1833,7 +1833,7 @@ static void gfx_v7_0_setup_rb(struct amdgpu_device *adev)
#define DEFAULT_SH_MEM_BASES (0x6000)
#define FIRST_COMPUTE_VMID (8)
#define LAST_COMPUTE_VMID (16)
-static void gmc_v7_0_init_compute_vmid(struct amdgpu_device *adev)
+static void gfx_v7_0_init_compute_vmid(struct amdgpu_device *adev)
{
int i;
uint32_t sh_mem_config;
@@ -1921,6 +1921,7 @@ static void gfx_v7_0_gpu_init(struct amdgpu_device *adev)
ELEMENT_SIZE, 1);
sh_static_mem_cfg = REG_SET_FIELD(sh_static_mem_cfg, SH_STATIC_MEM_CONFIG,
INDEX_STRIDE, 3);
+ WREG32(mmSH_STATIC_MEM_CONFIG, sh_static_mem_cfg);
mutex_lock(&adev->srbm_mutex);
for (i = 0; i < adev->vm_manager.id_mgr[0].num_ids; i++) {
@@ -1934,12 +1935,11 @@ static void gfx_v7_0_gpu_init(struct amdgpu_device *adev)
WREG32(mmSH_MEM_APE1_BASE, 1);
WREG32(mmSH_MEM_APE1_LIMIT, 0);
WREG32(mmSH_MEM_BASES, sh_mem_base);
- WREG32(mmSH_STATIC_MEM_CONFIG, sh_static_mem_cfg);
}
cik_srbm_select(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
- gmc_v7_0_init_compute_vmid(adev);
+ gfx_v7_0_init_compute_vmid(adev);
WREG32(mmSX_DEBUG_1, 0x20);
@@ -2021,7 +2021,7 @@ static void gfx_v7_0_gpu_init(struct amdgpu_device *adev)
*/
static void gfx_v7_0_scratch_init(struct amdgpu_device *adev)
{
- adev->gfx.scratch.num_reg = 7;
+ adev->gfx.scratch.num_reg = 8;
adev->gfx.scratch.reg_base = mmSCRATCH_REG0;
adev->gfx.scratch.free_mask = (1u << adev->gfx.scratch.num_reg) - 1;
}
@@ -2774,39 +2774,18 @@ static int gfx_v7_0_cp_compute_load_microcode(struct amdgpu_device *adev)
*/
static void gfx_v7_0_cp_compute_fini(struct amdgpu_device *adev)
{
- int i, r;
+ int i;
for (i = 0; i < adev->gfx.num_compute_rings; i++) {
struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
- if (ring->mqd_obj) {
- r = amdgpu_bo_reserve(ring->mqd_obj, true);
- if (unlikely(r != 0))
- dev_warn(adev->dev, "(%d) reserve MQD bo failed\n", r);
-
- amdgpu_bo_unpin(ring->mqd_obj);
- amdgpu_bo_unreserve(ring->mqd_obj);
-
- amdgpu_bo_unref(&ring->mqd_obj);
- ring->mqd_obj = NULL;
- }
+ amdgpu_bo_free_kernel(&ring->mqd_obj, NULL, NULL);
}
}
static void gfx_v7_0_mec_fini(struct amdgpu_device *adev)
{
- int r;
-
- if (adev->gfx.mec.hpd_eop_obj) {
- r = amdgpu_bo_reserve(adev->gfx.mec.hpd_eop_obj, true);
- if (unlikely(r != 0))
- dev_warn(adev->dev, "(%d) reserve HPD EOP bo failed\n", r);
- amdgpu_bo_unpin(adev->gfx.mec.hpd_eop_obj);
- amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
-
- amdgpu_bo_unref(&adev->gfx.mec.hpd_eop_obj);
- adev->gfx.mec.hpd_eop_obj = NULL;
- }
+ amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
}
static int gfx_v7_0_mec_init(struct amdgpu_device *adev)
@@ -2823,33 +2802,14 @@ static int gfx_v7_0_mec_init(struct amdgpu_device *adev)
/* allocate space for ALL pipes (even the ones we don't own) */
mec_hpd_size = adev->gfx.mec.num_mec * adev->gfx.mec.num_pipe_per_mec
* GFX7_MEC_HPD_SIZE * 2;
- if (adev->gfx.mec.hpd_eop_obj == NULL) {
- r = amdgpu_bo_create(adev,
- mec_hpd_size,
- PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
- &adev->gfx.mec.hpd_eop_obj);
- if (r) {
- dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
- return r;
- }
- }
- r = amdgpu_bo_reserve(adev->gfx.mec.hpd_eop_obj, false);
- if (unlikely(r != 0)) {
- gfx_v7_0_mec_fini(adev);
- return r;
- }
- r = amdgpu_bo_pin(adev->gfx.mec.hpd_eop_obj, AMDGPU_GEM_DOMAIN_GTT,
- &adev->gfx.mec.hpd_eop_gpu_addr);
- if (r) {
- dev_warn(adev->dev, "(%d) pin HDP EOP bo failed\n", r);
- gfx_v7_0_mec_fini(adev);
- return r;
- }
- r = amdgpu_bo_kmap(adev->gfx.mec.hpd_eop_obj, (void **)&hpd);
+ r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_GTT,
+ &adev->gfx.mec.hpd_eop_obj,
+ &adev->gfx.mec.hpd_eop_gpu_addr,
+ (void **)&hpd);
if (r) {
- dev_warn(adev->dev, "(%d) map HDP EOP bo failed\n", r);
+ dev_warn(adev->dev, "(%d) create, pin or map of HDP EOP bo failed\n", r);
gfx_v7_0_mec_fini(adev);
return r;
}
@@ -3108,32 +3068,12 @@ static int gfx_v7_0_compute_queue_init(struct amdgpu_device *adev, int ring_id)
struct cik_mqd *mqd;
struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id];
- if (ring->mqd_obj == NULL) {
- r = amdgpu_bo_create(adev,
- sizeof(struct cik_mqd),
- PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
- &ring->mqd_obj);
- if (r) {
- dev_warn(adev->dev, "(%d) create MQD bo failed\n", r);
- return r;
- }
- }
-
- r = amdgpu_bo_reserve(ring->mqd_obj, false);
- if (unlikely(r != 0))
- goto out;
-
- r = amdgpu_bo_pin(ring->mqd_obj, AMDGPU_GEM_DOMAIN_GTT,
- &mqd_gpu_addr);
- if (r) {
- dev_warn(adev->dev, "(%d) pin MQD bo failed\n", r);
- goto out_unreserve;
- }
- r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&mqd);
+ r = amdgpu_bo_create_reserved(adev, sizeof(struct cik_mqd), PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_GTT, &ring->mqd_obj,
+ &mqd_gpu_addr, (void **)&mqd);
if (r) {
- dev_warn(adev->dev, "(%d) map MQD bo failed\n", r);
- goto out_unreserve;
+ dev_warn(adev->dev, "(%d) create MQD bo failed\n", r);
+ return r;
}
mutex_lock(&adev->srbm_mutex);
@@ -3147,9 +3087,7 @@ static int gfx_v7_0_compute_queue_init(struct amdgpu_device *adev, int ring_id)
mutex_unlock(&adev->srbm_mutex);
amdgpu_bo_kunmap(ring->mqd_obj);
-out_unreserve:
amdgpu_bo_unreserve(ring->mqd_obj);
-out:
return 0;
}
@@ -3361,43 +3299,9 @@ static void gfx_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
*/
static void gfx_v7_0_rlc_fini(struct amdgpu_device *adev)
{
- int r;
-
- /* save restore block */
- if (adev->gfx.rlc.save_restore_obj) {
- r = amdgpu_bo_reserve(adev->gfx.rlc.save_restore_obj, true);
- if (unlikely(r != 0))
- dev_warn(adev->dev, "(%d) reserve RLC sr bo failed\n", r);
- amdgpu_bo_unpin(adev->gfx.rlc.save_restore_obj);
- amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj);
-
- amdgpu_bo_unref(&adev->gfx.rlc.save_restore_obj);
- adev->gfx.rlc.save_restore_obj = NULL;
- }
-
- /* clear state block */
- if (adev->gfx.rlc.clear_state_obj) {
- r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, true);
- if (unlikely(r != 0))
- dev_warn(adev->dev, "(%d) reserve RLC c bo failed\n", r);
- amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
- amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
-
- amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj);
- adev->gfx.rlc.clear_state_obj = NULL;
- }
-
- /* clear state block */
- if (adev->gfx.rlc.cp_table_obj) {
- r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, true);
- if (unlikely(r != 0))
- dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r);
- amdgpu_bo_unpin(adev->gfx.rlc.cp_table_obj);
- amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
-
- amdgpu_bo_unref(&adev->gfx.rlc.cp_table_obj);
- adev->gfx.rlc.cp_table_obj = NULL;
- }
+ amdgpu_bo_free_kernel(&adev->gfx.rlc.save_restore_obj, NULL, NULL);
+ amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj, NULL, NULL);
+ amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj, NULL, NULL);
}
static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
@@ -3432,39 +3336,17 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
if (src_ptr) {
/* save restore block */
- if (adev->gfx.rlc.save_restore_obj == NULL) {
- r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
- AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
- NULL, NULL,
- &adev->gfx.rlc.save_restore_obj);
- if (r) {
- dev_warn(adev->dev, "(%d) create RLC sr bo failed\n", r);
- return r;
- }
- }
-
- r = amdgpu_bo_reserve(adev->gfx.rlc.save_restore_obj, false);
- if (unlikely(r != 0)) {
- gfx_v7_0_rlc_fini(adev);
- return r;
- }
- r = amdgpu_bo_pin(adev->gfx.rlc.save_restore_obj, AMDGPU_GEM_DOMAIN_VRAM,
- &adev->gfx.rlc.save_restore_gpu_addr);
+ r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &adev->gfx.rlc.save_restore_obj,
+ &adev->gfx.rlc.save_restore_gpu_addr,
+ (void **)&adev->gfx.rlc.sr_ptr);
if (r) {
- amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj);
- dev_warn(adev->dev, "(%d) pin RLC sr bo failed\n", r);
+ dev_warn(adev->dev, "(%d) create, pin or map of RLC sr bo failed\n", r);
gfx_v7_0_rlc_fini(adev);
return r;
}
- r = amdgpu_bo_kmap(adev->gfx.rlc.save_restore_obj, (void **)&adev->gfx.rlc.sr_ptr);
- if (r) {
- dev_warn(adev->dev, "(%d) map RLC sr bo failed\n", r);
- gfx_v7_0_rlc_fini(adev);
- return r;
- }
/* write the sr buffer */
dst_ptr = adev->gfx.rlc.sr_ptr;
for (i = 0; i < adev->gfx.rlc.reg_list_size; i++)
@@ -3477,39 +3359,17 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
/* clear state block */
adev->gfx.rlc.clear_state_size = dws = gfx_v7_0_get_csb_size(adev);
- if (adev->gfx.rlc.clear_state_obj == NULL) {
- r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
- AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
- NULL, NULL,
- &adev->gfx.rlc.clear_state_obj);
- if (r) {
- dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r);
- gfx_v7_0_rlc_fini(adev);
- return r;
- }
- }
- r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false);
- if (unlikely(r != 0)) {
- gfx_v7_0_rlc_fini(adev);
- return r;
- }
- r = amdgpu_bo_pin(adev->gfx.rlc.clear_state_obj, AMDGPU_GEM_DOMAIN_VRAM,
- &adev->gfx.rlc.clear_state_gpu_addr);
+ r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &adev->gfx.rlc.clear_state_obj,
+ &adev->gfx.rlc.clear_state_gpu_addr,
+ (void **)&adev->gfx.rlc.cs_ptr);
if (r) {
- amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
- dev_warn(adev->dev, "(%d) pin RLC c bo failed\n", r);
+ dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r);
gfx_v7_0_rlc_fini(adev);
return r;
}
- r = amdgpu_bo_kmap(adev->gfx.rlc.clear_state_obj, (void **)&adev->gfx.rlc.cs_ptr);
- if (r) {
- dev_warn(adev->dev, "(%d) map RLC c bo failed\n", r);
- gfx_v7_0_rlc_fini(adev);
- return r;
- }
/* set up the cs buffer */
dst_ptr = adev->gfx.rlc.cs_ptr;
gfx_v7_0_get_csb_buffer(adev, dst_ptr);
@@ -3518,37 +3378,14 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
}
if (adev->gfx.rlc.cp_table_size) {
- if (adev->gfx.rlc.cp_table_obj == NULL) {
- r = amdgpu_bo_create(adev, adev->gfx.rlc.cp_table_size, PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
- AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
- NULL, NULL,
- &adev->gfx.rlc.cp_table_obj);
- if (r) {
- dev_warn(adev->dev, "(%d) create RLC cp table bo failed\n", r);
- gfx_v7_0_rlc_fini(adev);
- return r;
- }
- }
- r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, false);
- if (unlikely(r != 0)) {
- dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r);
- gfx_v7_0_rlc_fini(adev);
- return r;
- }
- r = amdgpu_bo_pin(adev->gfx.rlc.cp_table_obj, AMDGPU_GEM_DOMAIN_VRAM,
- &adev->gfx.rlc.cp_table_gpu_addr);
- if (r) {
- amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
- dev_warn(adev->dev, "(%d) pin RLC cp_table bo failed\n", r);
- gfx_v7_0_rlc_fini(adev);
- return r;
- }
- r = amdgpu_bo_kmap(adev->gfx.rlc.cp_table_obj, (void **)&adev->gfx.rlc.cp_table_ptr);
+ r = amdgpu_bo_create_reserved(adev, adev->gfx.rlc.cp_table_size,
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
+ &adev->gfx.rlc.cp_table_obj,
+ &adev->gfx.rlc.cp_table_gpu_addr,
+ (void **)&adev->gfx.rlc.cp_table_ptr);
if (r) {
- dev_warn(adev->dev, "(%d) map RLC cp table bo failed\n", r);
+ dev_warn(adev->dev, "(%d) create RLC cp table bo failed\n", r);
gfx_v7_0_rlc_fini(adev);
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index aa5a50f5eac8..fc260c13b1da 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -193,8 +193,8 @@ static const u32 tonga_golden_common_all[] =
mmGB_ADDR_CONFIG, 0xffffffff, 0x22011003,
mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
- mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00007FBF,
- mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00007FAF
+ mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF,
+ mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF
};
static const u32 tonga_mgcg_cgcg_init[] =
@@ -303,8 +303,8 @@ static const u32 polaris11_golden_common_all[] =
mmGB_ADDR_CONFIG, 0xffffffff, 0x22011002,
mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
- mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00007FBF,
- mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00007FAF,
+ mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF,
+ mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF,
};
static const u32 golden_settings_polaris10_a11[] =
@@ -336,8 +336,8 @@ static const u32 polaris10_golden_common_all[] =
mmGB_ADDR_CONFIG, 0xffffffff, 0x22011003,
mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
- mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00007FBF,
- mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00007FAF,
+ mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF,
+ mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF,
};
static const u32 fiji_golden_common_all[] =
@@ -348,8 +348,8 @@ static const u32 fiji_golden_common_all[] =
mmGB_ADDR_CONFIG, 0xffffffff, 0x22011003,
mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
- mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00007FBF,
- mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00007FAF,
+ mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF,
+ mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF,
mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
mmSPI_CONFIG_CNTL_1, 0x0000000f, 0x00000009,
};
@@ -436,8 +436,8 @@ static const u32 iceland_golden_common_all[] =
mmGB_ADDR_CONFIG, 0xffffffff, 0x22010001,
mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
- mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00007FBF,
- mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00007FAF
+ mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF,
+ mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF
};
static const u32 iceland_mgcg_cgcg_init[] =
@@ -532,8 +532,8 @@ static const u32 cz_golden_common_all[] =
mmGB_ADDR_CONFIG, 0xffffffff, 0x22010001,
mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
- mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00007FBF,
- mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00007FAF
+ mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF,
+ mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF
};
static const u32 cz_mgcg_cgcg_init[] =
@@ -637,8 +637,8 @@ static const u32 stoney_golden_common_all[] =
mmGB_ADDR_CONFIG, 0xffffffff, 0x12010001,
mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
- mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00007FBF,
- mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00007FAF,
+ mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF,
+ mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF,
};
static const u32 stoney_mgcg_cgcg_init[] =
@@ -750,7 +750,7 @@ static void gfx_v8_0_init_golden_registers(struct amdgpu_device *adev)
static void gfx_v8_0_scratch_init(struct amdgpu_device *adev)
{
- adev->gfx.scratch.num_reg = 7;
+ adev->gfx.scratch.num_reg = 8;
adev->gfx.scratch.reg_base = mmSCRATCH_REG0;
adev->gfx.scratch.free_mask = (1u << adev->gfx.scratch.num_reg) - 1;
}
@@ -1238,29 +1238,8 @@ static void cz_init_cp_jump_table(struct amdgpu_device *adev)
static void gfx_v8_0_rlc_fini(struct amdgpu_device *adev)
{
- int r;
-
- /* clear state block */
- if (adev->gfx.rlc.clear_state_obj) {
- r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, true);
- if (unlikely(r != 0))
- dev_warn(adev->dev, "(%d) reserve RLC cbs bo failed\n", r);
- amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
- amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
- amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj);
- adev->gfx.rlc.clear_state_obj = NULL;
- }
-
- /* jump table block */
- if (adev->gfx.rlc.cp_table_obj) {
- r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, true);
- if (unlikely(r != 0))
- dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r);
- amdgpu_bo_unpin(adev->gfx.rlc.cp_table_obj);
- amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
- amdgpu_bo_unref(&adev->gfx.rlc.cp_table_obj);
- adev->gfx.rlc.cp_table_obj = NULL;
- }
+ amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj, NULL, NULL);
+ amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj, NULL, NULL);
}
static int gfx_v8_0_rlc_init(struct amdgpu_device *adev)
@@ -1278,39 +1257,17 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev)
/* clear state block */
adev->gfx.rlc.clear_state_size = dws = gfx_v8_0_get_csb_size(adev);
- if (adev->gfx.rlc.clear_state_obj == NULL) {
- r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
- AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
- NULL, NULL,
- &adev->gfx.rlc.clear_state_obj);
- if (r) {
- dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r);
- gfx_v8_0_rlc_fini(adev);
- return r;
- }
- }
- r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false);
- if (unlikely(r != 0)) {
- gfx_v8_0_rlc_fini(adev);
- return r;
- }
- r = amdgpu_bo_pin(adev->gfx.rlc.clear_state_obj, AMDGPU_GEM_DOMAIN_VRAM,
- &adev->gfx.rlc.clear_state_gpu_addr);
+ r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &adev->gfx.rlc.clear_state_obj,
+ &adev->gfx.rlc.clear_state_gpu_addr,
+ (void **)&adev->gfx.rlc.cs_ptr);
if (r) {
- amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
- dev_warn(adev->dev, "(%d) pin RLC cbs bo failed\n", r);
+ dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r);
gfx_v8_0_rlc_fini(adev);
return r;
}
- r = amdgpu_bo_kmap(adev->gfx.rlc.clear_state_obj, (void **)&adev->gfx.rlc.cs_ptr);
- if (r) {
- dev_warn(adev->dev, "(%d) map RLC cbs bo failed\n", r);
- gfx_v8_0_rlc_fini(adev);
- return r;
- }
/* set up the cs buffer */
dst_ptr = adev->gfx.rlc.cs_ptr;
gfx_v8_0_get_csb_buffer(adev, dst_ptr);
@@ -1321,34 +1278,13 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev)
if ((adev->asic_type == CHIP_CARRIZO) ||
(adev->asic_type == CHIP_STONEY)) {
adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */
- if (adev->gfx.rlc.cp_table_obj == NULL) {
- r = amdgpu_bo_create(adev, adev->gfx.rlc.cp_table_size, PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
- AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
- NULL, NULL,
- &adev->gfx.rlc.cp_table_obj);
- if (r) {
- dev_warn(adev->dev, "(%d) create RLC cp table bo failed\n", r);
- return r;
- }
- }
-
- r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, false);
- if (unlikely(r != 0)) {
- dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r);
- return r;
- }
- r = amdgpu_bo_pin(adev->gfx.rlc.cp_table_obj, AMDGPU_GEM_DOMAIN_VRAM,
- &adev->gfx.rlc.cp_table_gpu_addr);
- if (r) {
- amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
- dev_warn(adev->dev, "(%d) pin RLC cp table bo failed\n", r);
- return r;
- }
- r = amdgpu_bo_kmap(adev->gfx.rlc.cp_table_obj, (void **)&adev->gfx.rlc.cp_table_ptr);
+ r = amdgpu_bo_create_reserved(adev, adev->gfx.rlc.cp_table_size,
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
+ &adev->gfx.rlc.cp_table_obj,
+ &adev->gfx.rlc.cp_table_gpu_addr,
+ (void **)&adev->gfx.rlc.cp_table_ptr);
if (r) {
- dev_warn(adev->dev, "(%d) map RLC cp table bo failed\n", r);
+ dev_warn(adev->dev, "(%d) create RLC cp table bo failed\n", r);
return r;
}
@@ -1363,17 +1299,7 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev)
static void gfx_v8_0_mec_fini(struct amdgpu_device *adev)
{
- int r;
-
- if (adev->gfx.mec.hpd_eop_obj) {
- r = amdgpu_bo_reserve(adev->gfx.mec.hpd_eop_obj, true);
- if (unlikely(r != 0))
- dev_warn(adev->dev, "(%d) reserve HPD EOP bo failed\n", r);
- amdgpu_bo_unpin(adev->gfx.mec.hpd_eop_obj);
- amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
- amdgpu_bo_unref(&adev->gfx.mec.hpd_eop_obj);
- adev->gfx.mec.hpd_eop_obj = NULL;
- }
+ amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
}
static int gfx_v8_0_mec_init(struct amdgpu_device *adev)
@@ -1389,34 +1315,13 @@ static int gfx_v8_0_mec_init(struct amdgpu_device *adev)
mec_hpd_size = adev->gfx.num_compute_rings * GFX8_MEC_HPD_SIZE;
- if (adev->gfx.mec.hpd_eop_obj == NULL) {
- r = amdgpu_bo_create(adev,
- mec_hpd_size,
- PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
- &adev->gfx.mec.hpd_eop_obj);
- if (r) {
- dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
- return r;
- }
- }
-
- r = amdgpu_bo_reserve(adev->gfx.mec.hpd_eop_obj, false);
- if (unlikely(r != 0)) {
- gfx_v8_0_mec_fini(adev);
- return r;
- }
- r = amdgpu_bo_pin(adev->gfx.mec.hpd_eop_obj, AMDGPU_GEM_DOMAIN_GTT,
- &adev->gfx.mec.hpd_eop_gpu_addr);
+ r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_GTT,
+ &adev->gfx.mec.hpd_eop_obj,
+ &adev->gfx.mec.hpd_eop_gpu_addr,
+ (void **)&hpd);
if (r) {
- dev_warn(adev->dev, "(%d) pin HDP EOP bo failed\n", r);
- gfx_v8_0_mec_fini(adev);
- return r;
- }
- r = amdgpu_bo_kmap(adev->gfx.mec.hpd_eop_obj, (void **)&hpd);
- if (r) {
- dev_warn(adev->dev, "(%d) map HDP EOP bo failed\n", r);
- gfx_v8_0_mec_fini(adev);
+ dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
return r;
}
@@ -3802,6 +3707,8 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev)
ELEMENT_SIZE, 1);
sh_static_mem_cfg = REG_SET_FIELD(sh_static_mem_cfg, SH_STATIC_MEM_CONFIG,
INDEX_STRIDE, 3);
+ WREG32(mmSH_STATIC_MEM_CONFIG, sh_static_mem_cfg);
+
mutex_lock(&adev->srbm_mutex);
for (i = 0; i < adev->vm_manager.id_mgr[0].num_ids; i++) {
vi_srbm_select(adev, 0, 0, 0, i);
@@ -3825,7 +3732,6 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev)
WREG32(mmSH_MEM_APE1_BASE, 1);
WREG32(mmSH_MEM_APE1_LIMIT, 0);
- WREG32(mmSH_STATIC_MEM_CONFIG, sh_static_mem_cfg);
}
vi_srbm_select(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
@@ -4564,7 +4470,7 @@ static int gfx_v8_0_kiq_kcq_enable(struct amdgpu_device *adev)
/* This situation may be hit in the future if a new HW
* generation exposes more than 64 queues. If so, the
* definition of queue_mask needs updating */
- if (WARN_ON(i > (sizeof(queue_mask)*8))) {
+ if (WARN_ON(i >= (sizeof(queue_mask)*8))) {
DRM_ERROR("Invalid KCQ enabled: %d\n", i);
break;
}
@@ -4673,9 +4579,9 @@ static int gfx_v8_0_mqd_init(struct amdgpu_ring *ring)
mqd->compute_misc_reserved = 0x00000003;
if (!(adev->flags & AMD_IS_APU)) {
mqd->dynamic_cu_mask_addr_lo = lower_32_bits(ring->mqd_gpu_addr
- + offsetof(struct vi_mqd_allocation, dyamic_cu_mask));
+ + offsetof(struct vi_mqd_allocation, dynamic_cu_mask));
mqd->dynamic_cu_mask_addr_hi = upper_32_bits(ring->mqd_gpu_addr
- + offsetof(struct vi_mqd_allocation, dyamic_cu_mask));
+ + offsetof(struct vi_mqd_allocation, dynamic_cu_mask));
}
eop_base_addr = ring->eop_gpu_addr >> 8;
mqd->cp_hqd_eop_base_addr_lo = eop_base_addr;
@@ -4862,8 +4768,8 @@ static int gfx_v8_0_kiq_init_queue(struct amdgpu_ring *ring)
mutex_unlock(&adev->srbm_mutex);
} else {
memset((void *)mqd, 0, sizeof(struct vi_mqd_allocation));
- ((struct vi_mqd_allocation *)mqd)->dyamic_cu_mask = 0xFFFFFFFF;
- ((struct vi_mqd_allocation *)mqd)->dyamic_rb_mask = 0xFFFFFFFF;
+ ((struct vi_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
+ ((struct vi_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
mutex_lock(&adev->srbm_mutex);
vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
gfx_v8_0_mqd_init(ring);
@@ -4886,8 +4792,8 @@ static int gfx_v8_0_kcq_init_queue(struct amdgpu_ring *ring)
if (!adev->gfx.in_reset && !adev->gfx.in_suspend) {
memset((void *)mqd, 0, sizeof(struct vi_mqd_allocation));
- ((struct vi_mqd_allocation *)mqd)->dyamic_cu_mask = 0xFFFFFFFF;
- ((struct vi_mqd_allocation *)mqd)->dyamic_rb_mask = 0xFFFFFFFF;
+ ((struct vi_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
+ ((struct vi_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
mutex_lock(&adev->srbm_mutex);
vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
gfx_v8_0_mqd_init(ring);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index c9b9c88231aa..69182eeca264 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -116,7 +116,9 @@ static const u32 golden_settings_gc_9_0[] =
SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UTCL1_CNTL_2), 0x08000000, 0x08000080,
SOC15_REG_OFFSET(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL), 0x08000000, 0x08000080,
SOC15_REG_OFFSET(GC, 0, mmRLC_SPM_UTCL1_CNTL), 0x08000000, 0x08000080,
+ SOC15_REG_OFFSET(GC, 0, mmSH_MEM_CONFIG), 0x00001000, 0x00001000,
SOC15_REG_OFFSET(GC, 0, mmSPI_CONFIG_CNTL_1), 0x0000000f, 0x01000107,
+ SOC15_REG_OFFSET(GC, 0, mmSQC_CONFIG), 0x03000000, 0x020a2000,
SOC15_REG_OFFSET(GC, 0, mmTA_CNTL_AUX), 0xfffffeef, 0x010b0000,
SOC15_REG_OFFSET(GC, 0, mmTCP_CHAN_STEER_HI), 0xffffffff, 0x4a2c0e68,
SOC15_REG_OFFSET(GC, 0, mmTCP_CHAN_STEER_LO), 0xffffffff, 0xb5d3f197,
@@ -211,7 +213,7 @@ static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev)
static void gfx_v9_0_scratch_init(struct amdgpu_device *adev)
{
- adev->gfx.scratch.num_reg = 7;
+ adev->gfx.scratch.num_reg = 8;
adev->gfx.scratch.reg_base = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG0);
adev->gfx.scratch.free_mask = (1u << adev->gfx.scratch.num_reg) - 1;
}
@@ -772,18 +774,16 @@ static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
if (cs_data) {
/* clear state block */
adev->gfx.rlc.clear_state_size = dws = gfx_v9_0_get_csb_size(adev);
- if (adev->gfx.rlc.clear_state_obj == NULL) {
- r = amdgpu_bo_create_kernel(adev, dws * 4, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
- &adev->gfx.rlc.clear_state_obj,
- &adev->gfx.rlc.clear_state_gpu_addr,
- (void **)&adev->gfx.rlc.cs_ptr);
- if (r) {
- dev_err(adev->dev,
- "(%d) failed to create rlc csb bo\n", r);
- gfx_v9_0_rlc_fini(adev);
- return r;
- }
+ r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &adev->gfx.rlc.clear_state_obj,
+ &adev->gfx.rlc.clear_state_gpu_addr,
+ (void **)&adev->gfx.rlc.cs_ptr);
+ if (r) {
+ dev_err(adev->dev, "(%d) failed to create rlc csb bo\n",
+ r);
+ gfx_v9_0_rlc_fini(adev);
+ return r;
}
/* set up the cs buffer */
dst_ptr = adev->gfx.rlc.cs_ptr;
@@ -795,18 +795,16 @@ static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
if (adev->asic_type == CHIP_RAVEN) {
/* TODO: double check the cp_table_size for RV */
adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */
- if (adev->gfx.rlc.cp_table_obj == NULL) {
- r = amdgpu_bo_create_kernel(adev, adev->gfx.rlc.cp_table_size,
- PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
- &adev->gfx.rlc.cp_table_obj,
- &adev->gfx.rlc.cp_table_gpu_addr,
- (void **)&adev->gfx.rlc.cp_table_ptr);
- if (r) {
- dev_err(adev->dev,
- "(%d) failed to create cp table bo\n", r);
- gfx_v9_0_rlc_fini(adev);
- return r;
- }
+ r = amdgpu_bo_create_reserved(adev, adev->gfx.rlc.cp_table_size,
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
+ &adev->gfx.rlc.cp_table_obj,
+ &adev->gfx.rlc.cp_table_gpu_addr,
+ (void **)&adev->gfx.rlc.cp_table_ptr);
+ if (r) {
+ dev_err(adev->dev,
+ "(%d) failed to create cp table bo\n", r);
+ gfx_v9_0_rlc_fini(adev);
+ return r;
}
rv_init_cp_jump_table(adev);
@@ -821,28 +819,8 @@ static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
static void gfx_v9_0_mec_fini(struct amdgpu_device *adev)
{
- int r;
-
- if (adev->gfx.mec.hpd_eop_obj) {
- r = amdgpu_bo_reserve(adev->gfx.mec.hpd_eop_obj, true);
- if (unlikely(r != 0))
- dev_warn(adev->dev, "(%d) reserve HPD EOP bo failed\n", r);
- amdgpu_bo_unpin(adev->gfx.mec.hpd_eop_obj);
- amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
-
- amdgpu_bo_unref(&adev->gfx.mec.hpd_eop_obj);
- adev->gfx.mec.hpd_eop_obj = NULL;
- }
- if (adev->gfx.mec.mec_fw_obj) {
- r = amdgpu_bo_reserve(adev->gfx.mec.mec_fw_obj, true);
- if (unlikely(r != 0))
- dev_warn(adev->dev, "(%d) reserve mec firmware bo failed\n", r);
- amdgpu_bo_unpin(adev->gfx.mec.mec_fw_obj);
- amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj);
-
- amdgpu_bo_unref(&adev->gfx.mec.mec_fw_obj);
- adev->gfx.mec.mec_fw_obj = NULL;
- }
+ amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
+ amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_obj, NULL, NULL);
}
static int gfx_v9_0_mec_init(struct amdgpu_device *adev)
@@ -862,33 +840,13 @@ static int gfx_v9_0_mec_init(struct amdgpu_device *adev)
amdgpu_gfx_compute_queue_acquire(adev);
mec_hpd_size = adev->gfx.num_compute_rings * GFX9_MEC_HPD_SIZE;
- if (adev->gfx.mec.hpd_eop_obj == NULL) {
- r = amdgpu_bo_create(adev,
- mec_hpd_size,
- PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
- &adev->gfx.mec.hpd_eop_obj);
- if (r) {
- dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
- return r;
- }
- }
-
- r = amdgpu_bo_reserve(adev->gfx.mec.hpd_eop_obj, false);
- if (unlikely(r != 0)) {
- gfx_v9_0_mec_fini(adev);
- return r;
- }
- r = amdgpu_bo_pin(adev->gfx.mec.hpd_eop_obj, AMDGPU_GEM_DOMAIN_GTT,
- &adev->gfx.mec.hpd_eop_gpu_addr);
+ r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_GTT,
+ &adev->gfx.mec.hpd_eop_obj,
+ &adev->gfx.mec.hpd_eop_gpu_addr,
+ (void **)&hpd);
if (r) {
- dev_warn(adev->dev, "(%d) pin HDP EOP bo failed\n", r);
- gfx_v9_0_mec_fini(adev);
- return r;
- }
- r = amdgpu_bo_kmap(adev->gfx.mec.hpd_eop_obj, (void **)&hpd);
- if (r) {
- dev_warn(adev->dev, "(%d) map HDP EOP bo failed\n", r);
+ dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
gfx_v9_0_mec_fini(adev);
return r;
}
@@ -905,42 +863,22 @@ static int gfx_v9_0_mec_init(struct amdgpu_device *adev)
le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes) / 4;
- if (adev->gfx.mec.mec_fw_obj == NULL) {
- r = amdgpu_bo_create(adev,
- mec_hdr->header.ucode_size_bytes,
- PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
- &adev->gfx.mec.mec_fw_obj);
- if (r) {
- dev_warn(adev->dev, "(%d) create mec firmware bo failed\n", r);
- return r;
- }
- }
-
- r = amdgpu_bo_reserve(adev->gfx.mec.mec_fw_obj, false);
- if (unlikely(r != 0)) {
- gfx_v9_0_mec_fini(adev);
- return r;
- }
- r = amdgpu_bo_pin(adev->gfx.mec.mec_fw_obj, AMDGPU_GEM_DOMAIN_GTT,
- &adev->gfx.mec.mec_fw_gpu_addr);
- if (r) {
- dev_warn(adev->dev, "(%d) pin mec firmware bo failed\n", r);
- gfx_v9_0_mec_fini(adev);
- return r;
- }
- r = amdgpu_bo_kmap(adev->gfx.mec.mec_fw_obj, (void **)&fw);
+ r = amdgpu_bo_create_reserved(adev, mec_hdr->header.ucode_size_bytes,
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
+ &adev->gfx.mec.mec_fw_obj,
+ &adev->gfx.mec.mec_fw_gpu_addr,
+ (void **)&fw);
if (r) {
- dev_warn(adev->dev, "(%d) map firmware bo failed\n", r);
+ dev_warn(adev->dev, "(%d) create mec firmware bo failed\n", r);
gfx_v9_0_mec_fini(adev);
return r;
}
+
memcpy(fw, fw_data, fw_size);
amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj);
amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj);
-
return 0;
}
@@ -2219,7 +2157,7 @@ static int gfx_v9_0_cp_gfx_start(struct amdgpu_device *adev)
struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0];
const struct cs_section_def *sect = NULL;
const struct cs_extent_def *ext = NULL;
- int r, i;
+ int r, i, tmp;
/* init the CP */
WREG32_SOC15(GC, 0, mmCP_MAX_CONTEXT, adev->gfx.config.max_hw_contexts - 1);
@@ -2227,7 +2165,7 @@ static int gfx_v9_0_cp_gfx_start(struct amdgpu_device *adev)
gfx_v9_0_cp_gfx_enable(adev, true);
- r = amdgpu_ring_alloc(ring, gfx_v9_0_get_csb_size(adev) + 4);
+ r = amdgpu_ring_alloc(ring, gfx_v9_0_get_csb_size(adev) + 4 + 3);
if (r) {
DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
return r;
@@ -2265,6 +2203,12 @@ static int gfx_v9_0_cp_gfx_start(struct amdgpu_device *adev)
amdgpu_ring_write(ring, 0x8000);
amdgpu_ring_write(ring, 0x8000);
+ amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG,1));
+ tmp = (PACKET3_SET_UCONFIG_REG_INDEX_TYPE |
+ (SOC15_REG_OFFSET(GC, 0, mmVGT_INDEX_TYPE) - PACKET3_SET_UCONFIG_REG_START));
+ amdgpu_ring_write(ring, tmp);
+ amdgpu_ring_write(ring, 0);
+
amdgpu_ring_commit(ring);
return 0;
@@ -2427,7 +2371,7 @@ static int gfx_v9_0_kiq_kcq_enable(struct amdgpu_device *adev)
/* This situation may be hit in the future if a new HW
* generation exposes more than 64 queues. If so, the
* definition of queue_mask needs updating */
- if (WARN_ON(i > (sizeof(queue_mask)*8))) {
+ if (WARN_ON(i >= (sizeof(queue_mask)*8))) {
DRM_ERROR("Invalid KCQ enabled: %d\n", i);
break;
}
@@ -4158,7 +4102,7 @@ static int gfx_v9_0_kiq_irq(struct amdgpu_device *adev,
return 0;
}
-const struct amd_ip_funcs gfx_v9_0_ip_funcs = {
+static const struct amd_ip_funcs gfx_v9_0_ip_funcs = {
.name = "gfx_v9_0",
.early_init = gfx_v9_0_early_init,
.late_init = gfx_v9_0_late_init,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.h b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.h
index 56ef652a575d..fa5a3fbaf6ab 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.h
@@ -24,7 +24,6 @@
#ifndef __GFX_V9_0_H__
#define __GFX_V9_0_H__
-extern const struct amd_ip_funcs gfx_v9_0_ip_funcs;
extern const struct amdgpu_ip_block_version gfx_v9_0_ip_block;
void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
index a42f483767e7..6c8040e616c4 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
@@ -58,14 +58,14 @@ static void gfxhub_v1_0_init_gart_aperture_regs(struct amdgpu_device *adev)
gfxhub_v1_0_init_gart_pt_regs(adev);
WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
- (u32)(adev->mc.gtt_start >> 12));
+ (u32)(adev->mc.gart_start >> 12));
WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
- (u32)(adev->mc.gtt_start >> 44));
+ (u32)(adev->mc.gart_start >> 44));
WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
- (u32)(adev->mc.gtt_end >> 12));
+ (u32)(adev->mc.gart_end >> 12));
WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
- (u32)(adev->mc.gtt_end >> 44));
+ (u32)(adev->mc.gart_end >> 44));
}
static void gfxhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
@@ -129,7 +129,7 @@ static void gfxhub_v1_0_init_cache_regs(struct amdgpu_device *adev)
/* Setup L2 cache */
tmp = RREG32_SOC15(GC, 0, mmVM_L2_CNTL);
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1);
- tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 0);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 1);
/* XXX for emulation, Refer to closed source code.*/
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, L2_PDE0_CACHE_TAG_GENERATION_MODE,
0);
@@ -144,6 +144,8 @@ static void gfxhub_v1_0_init_cache_regs(struct amdgpu_device *adev)
WREG32_SOC15(GC, 0, mmVM_L2_CNTL2, tmp);
tmp = mmVM_L2_CNTL3_DEFAULT;
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 9);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, 6);
WREG32_SOC15(GC, 0, mmVM_L2_CNTL3, tmp);
tmp = mmVM_L2_CNTL4_DEFAULT;
@@ -206,6 +208,9 @@ static void gfxhub_v1_0_setup_vmid_config(struct amdgpu_device *adev)
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
PAGE_TABLE_BLOCK_SIZE,
adev->vm_manager.block_size - 9);
+ /* Send no-retry XNACK on fault to suppress VM fault storm. */
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
+ RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0);
WREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT1_CNTL, i, tmp);
WREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32, i*2, 0);
WREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32, i*2, 0);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.h b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.h
index d2dbb085f480..206e29cad753 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.h
@@ -30,7 +30,5 @@ void gfxhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev,
bool value);
void gfxhub_v1_0_init(struct amdgpu_device *adev);
u64 gfxhub_v1_0_get_mc_fb_offset(struct amdgpu_device *adev);
-extern const struct amd_ip_funcs gfxhub_v1_0_ip_funcs;
-extern const struct amdgpu_ip_block_version gfxhub_v1_0_ip_block;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
index d0214d942bfc..5be9c83dfcf7 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -66,14 +66,10 @@ static const u32 crtc_offsets[6] =
SI_CRTC5_REGISTER_OFFSET
};
-static void gmc_v6_0_mc_stop(struct amdgpu_device *adev,
- struct amdgpu_mode_mc_save *save)
+static void gmc_v6_0_mc_stop(struct amdgpu_device *adev)
{
u32 blackout;
- if (adev->mode_info.num_crtc)
- amdgpu_display_stop_mc_access(adev, save);
-
gmc_v6_0_wait_for_idle((void *)adev);
blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
@@ -90,8 +86,7 @@ static void gmc_v6_0_mc_stop(struct amdgpu_device *adev,
}
-static void gmc_v6_0_mc_resume(struct amdgpu_device *adev,
- struct amdgpu_mode_mc_save *save)
+static void gmc_v6_0_mc_resume(struct amdgpu_device *adev)
{
u32 tmp;
@@ -103,10 +98,6 @@ static void gmc_v6_0_mc_resume(struct amdgpu_device *adev,
tmp = REG_SET_FIELD(0, BIF_FB_EN, FB_READ_EN, 1);
tmp = REG_SET_FIELD(tmp, BIF_FB_EN, FB_WRITE_EN, 1);
WREG32(mmBIF_FB_EN, tmp);
-
- if (adev->mode_info.num_crtc)
- amdgpu_display_resume_mc_access(adev, save);
-
}
static int gmc_v6_0_init_microcode(struct amdgpu_device *adev)
@@ -228,20 +219,20 @@ static int gmc_v6_0_mc_load_microcode(struct amdgpu_device *adev)
static void gmc_v6_0_vram_gtt_location(struct amdgpu_device *adev,
struct amdgpu_mc *mc)
{
+ u64 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF;
+ base <<= 24;
+
if (mc->mc_vram_size > 0xFFC0000000ULL) {
dev_warn(adev->dev, "limiting VRAM\n");
mc->real_vram_size = 0xFFC0000000ULL;
mc->mc_vram_size = 0xFFC0000000ULL;
}
- amdgpu_vram_location(adev, &adev->mc, 0);
- adev->mc.gtt_base_align = 0;
- amdgpu_gtt_location(adev, mc);
+ amdgpu_vram_location(adev, &adev->mc, base);
+ amdgpu_gart_location(adev, mc);
}
static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
{
- struct amdgpu_mode_mc_save save;
- u32 tmp;
int i, j;
/* Initialize HDP */
@@ -254,16 +245,23 @@ static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
}
WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0);
- if (adev->mode_info.num_crtc)
- amdgpu_display_set_vga_render_state(adev, false);
-
- gmc_v6_0_mc_stop(adev, &save);
-
if (gmc_v6_0_wait_for_idle((void *)adev)) {
dev_warn(adev->dev, "Wait for MC idle timedout !\n");
}
- WREG32(mmVGA_HDP_CONTROL, VGA_HDP_CONTROL__VGA_MEMORY_DISABLE_MASK);
+ if (adev->mode_info.num_crtc) {
+ u32 tmp;
+
+ /* Lockout access through VGA aperture*/
+ tmp = RREG32(mmVGA_HDP_CONTROL);
+ tmp |= VGA_HDP_CONTROL__VGA_MEMORY_DISABLE_MASK;
+ WREG32(mmVGA_HDP_CONTROL, tmp);
+
+ /* disable VGA render */
+ tmp = RREG32(mmVGA_RENDER_CONTROL);
+ tmp &= ~VGA_VSTATUS_CNTL;
+ WREG32(mmVGA_RENDER_CONTROL, tmp);
+ }
/* Update configuration */
WREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
adev->mc.vram_start >> 12);
@@ -271,13 +269,6 @@ static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
adev->mc.vram_end >> 12);
WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
adev->vram_scratch.gpu_addr >> 12);
- tmp = ((adev->mc.vram_end >> 24) & 0xFFFF) << 16;
- tmp |= ((adev->mc.vram_start >> 24) & 0xFFFF);
- WREG32(mmMC_VM_FB_LOCATION, tmp);
- /* XXX double check these! */
- WREG32(mmHDP_NONSURFACE_BASE, (adev->mc.vram_start >> 8));
- WREG32(mmHDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
- WREG32(mmHDP_NONSURFACE_SIZE, 0x3FFFFFFF);
WREG32(mmMC_VM_AGP_BASE, 0);
WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF);
WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF);
@@ -285,7 +276,6 @@ static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
if (gmc_v6_0_wait_for_idle((void *)adev)) {
dev_warn(adev->dev, "Wait for MC idle timedout !\n");
}
- gmc_v6_0_mc_resume(adev, &save);
}
static int gmc_v6_0_mc_init(struct amdgpu_device *adev)
@@ -342,14 +332,23 @@ static int gmc_v6_0_mc_init(struct amdgpu_device *adev)
adev->mc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
adev->mc.visible_vram_size = adev->mc.aper_size;
- /* unless the user had overridden it, set the gart
- * size equal to the 1024 or vram, whichever is larger.
- */
- if (amdgpu_gart_size == -1)
- adev->mc.gtt_size = max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20),
- adev->mc.mc_vram_size);
- else
- adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20;
+ /* set the gart size */
+ if (amdgpu_gart_size == -1) {
+ switch (adev->asic_type) {
+ case CHIP_HAINAN: /* no MM engines */
+ default:
+ adev->mc.gart_size = 256ULL << 20;
+ break;
+ case CHIP_VERDE: /* UVD, VCE do not support GPUVM */
+ case CHIP_TAHITI: /* UVD, VCE do not support GPUVM */
+ case CHIP_PITCAIRN: /* UVD, VCE do not support GPUVM */
+ case CHIP_OLAND: /* UVD, VCE do not support GPUVM */
+ adev->mc.gart_size = 1024ULL << 20;
+ break;
+ }
+ } else {
+ adev->mc.gart_size = (u64)amdgpu_gart_size << 20;
+ }
gmc_v6_0_vram_gtt_location(adev, &adev->mc);
@@ -479,6 +478,7 @@ static void gmc_v6_0_set_prt(struct amdgpu_device *adev, bool enable)
static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
{
int r, i;
+ u32 field;
if (adev->gart.robj == NULL) {
dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
@@ -506,13 +506,15 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
WREG32(mmVM_L2_CNTL2,
VM_L2_CNTL2__INVALIDATE_ALL_L1_TLBS_MASK |
VM_L2_CNTL2__INVALIDATE_L2_CACHE_MASK);
+
+ field = adev->vm_manager.fragment_size;
WREG32(mmVM_L2_CNTL3,
VM_L2_CNTL3__L2_CACHE_BIGK_ASSOCIATIVITY_MASK |
- (4UL << VM_L2_CNTL3__BANK_SELECT__SHIFT) |
- (4UL << VM_L2_CNTL3__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT));
+ (field << VM_L2_CNTL3__BANK_SELECT__SHIFT) |
+ (field << VM_L2_CNTL3__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT));
/* setup context0 */
- WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12);
- WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gtt_end >> 12);
+ WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gart_start >> 12);
+ WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gart_end >> 12);
WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12);
WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
(u32)(adev->dummy_page.addr >> 12));
@@ -559,7 +561,7 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
gmc_v6_0_gart_flush_gpu_tlb(adev, 0);
dev_info(adev->dev, "PCIE GART of %uM enabled (table at 0x%016llX).\n",
- (unsigned)(adev->mc.gtt_size >> 20),
+ (unsigned)(adev->mc.gart_size >> 20),
(unsigned long long)adev->gart.table_addr);
adev->gart.ready = true;
return 0;
@@ -829,7 +831,7 @@ static int gmc_v6_0_sw_init(void *handle)
if (r)
return r;
- amdgpu_vm_adjust_size(adev, 64);
+ amdgpu_vm_adjust_size(adev, 64, 4);
adev->vm_manager.max_pfn = adev->vm_manager.vm_size << 18;
adev->mc.mc_mask = 0xffffffffffULL;
@@ -987,7 +989,6 @@ static int gmc_v6_0_wait_for_idle(void *handle)
static int gmc_v6_0_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct amdgpu_mode_mc_save save;
u32 srbm_soft_reset = 0;
u32 tmp = RREG32(mmSRBM_STATUS);
@@ -1003,7 +1004,7 @@ static int gmc_v6_0_soft_reset(void *handle)
}
if (srbm_soft_reset) {
- gmc_v6_0_mc_stop(adev, &save);
+ gmc_v6_0_mc_stop(adev);
if (gmc_v6_0_wait_for_idle(adev)) {
dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
}
@@ -1023,7 +1024,7 @@ static int gmc_v6_0_soft_reset(void *handle)
udelay(50);
- gmc_v6_0_mc_resume(adev, &save);
+ gmc_v6_0_mc_resume(adev);
udelay(50);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 7e9ea53edf8b..eace9e7182c8 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -37,6 +37,9 @@
#include "oss/oss_2_0_d.h"
#include "oss/oss_2_0_sh_mask.h"
+#include "dce/dce_8_0_d.h"
+#include "dce/dce_8_0_sh_mask.h"
+
#include "amdgpu_atombios.h"
static void gmc_v7_0_set_gart_funcs(struct amdgpu_device *adev);
@@ -76,14 +79,10 @@ static void gmc_v7_0_init_golden_registers(struct amdgpu_device *adev)
}
}
-static void gmc_v7_0_mc_stop(struct amdgpu_device *adev,
- struct amdgpu_mode_mc_save *save)
+static void gmc_v7_0_mc_stop(struct amdgpu_device *adev)
{
u32 blackout;
- if (adev->mode_info.num_crtc)
- amdgpu_display_stop_mc_access(adev, save);
-
gmc_v7_0_wait_for_idle((void *)adev);
blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
@@ -99,8 +98,7 @@ static void gmc_v7_0_mc_stop(struct amdgpu_device *adev,
udelay(100);
}
-static void gmc_v7_0_mc_resume(struct amdgpu_device *adev,
- struct amdgpu_mode_mc_save *save)
+static void gmc_v7_0_mc_resume(struct amdgpu_device *adev)
{
u32 tmp;
@@ -112,9 +110,6 @@ static void gmc_v7_0_mc_resume(struct amdgpu_device *adev,
tmp = REG_SET_FIELD(0, BIF_FB_EN, FB_READ_EN, 1);
tmp = REG_SET_FIELD(tmp, BIF_FB_EN, FB_WRITE_EN, 1);
WREG32(mmBIF_FB_EN, tmp);
-
- if (adev->mode_info.num_crtc)
- amdgpu_display_resume_mc_access(adev, save);
}
/**
@@ -242,15 +237,17 @@ static int gmc_v7_0_mc_load_microcode(struct amdgpu_device *adev)
static void gmc_v7_0_vram_gtt_location(struct amdgpu_device *adev,
struct amdgpu_mc *mc)
{
+ u64 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF;
+ base <<= 24;
+
if (mc->mc_vram_size > 0xFFC0000000ULL) {
/* leave room for at least 1024M GTT */
dev_warn(adev->dev, "limiting VRAM\n");
mc->real_vram_size = 0xFFC0000000ULL;
mc->mc_vram_size = 0xFFC0000000ULL;
}
- amdgpu_vram_location(adev, &adev->mc, 0);
- adev->mc.gtt_base_align = 0;
- amdgpu_gtt_location(adev, mc);
+ amdgpu_vram_location(adev, &adev->mc, base);
+ amdgpu_gart_location(adev, mc);
}
/**
@@ -263,7 +260,6 @@ static void gmc_v7_0_vram_gtt_location(struct amdgpu_device *adev,
*/
static void gmc_v7_0_mc_program(struct amdgpu_device *adev)
{
- struct amdgpu_mode_mc_save save;
u32 tmp;
int i, j;
@@ -277,13 +273,20 @@ static void gmc_v7_0_mc_program(struct amdgpu_device *adev)
}
WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0);
- if (adev->mode_info.num_crtc)
- amdgpu_display_set_vga_render_state(adev, false);
-
- gmc_v7_0_mc_stop(adev, &save);
if (gmc_v7_0_wait_for_idle((void *)adev)) {
dev_warn(adev->dev, "Wait for MC idle timedout !\n");
}
+ if (adev->mode_info.num_crtc) {
+ /* Lockout access through VGA aperture*/
+ tmp = RREG32(mmVGA_HDP_CONTROL);
+ tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
+ WREG32(mmVGA_HDP_CONTROL, tmp);
+
+ /* disable VGA render */
+ tmp = RREG32(mmVGA_RENDER_CONTROL);
+ tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
+ WREG32(mmVGA_RENDER_CONTROL, tmp);
+ }
/* Update configuration */
WREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
adev->mc.vram_start >> 12);
@@ -291,20 +294,12 @@ static void gmc_v7_0_mc_program(struct amdgpu_device *adev)
adev->mc.vram_end >> 12);
WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
adev->vram_scratch.gpu_addr >> 12);
- tmp = ((adev->mc.vram_end >> 24) & 0xFFFF) << 16;
- tmp |= ((adev->mc.vram_start >> 24) & 0xFFFF);
- WREG32(mmMC_VM_FB_LOCATION, tmp);
- /* XXX double check these! */
- WREG32(mmHDP_NONSURFACE_BASE, (adev->mc.vram_start >> 8));
- WREG32(mmHDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
- WREG32(mmHDP_NONSURFACE_SIZE, 0x3FFFFFFF);
WREG32(mmMC_VM_AGP_BASE, 0);
WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF);
WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF);
if (gmc_v7_0_wait_for_idle((void *)adev)) {
dev_warn(adev->dev, "Wait for MC idle timedout !\n");
}
- gmc_v7_0_mc_resume(adev, &save);
WREG32(mmBIF_FB_EN, BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK);
@@ -391,14 +386,26 @@ static int gmc_v7_0_mc_init(struct amdgpu_device *adev)
if (adev->mc.visible_vram_size > adev->mc.real_vram_size)
adev->mc.visible_vram_size = adev->mc.real_vram_size;
- /* unless the user had overridden it, set the gart
- * size equal to the 1024 or vram, whichever is larger.
- */
- if (amdgpu_gart_size == -1)
- adev->mc.gtt_size = max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20),
- adev->mc.mc_vram_size);
- else
- adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20;
+ /* set the gart size */
+ if (amdgpu_gart_size == -1) {
+ switch (adev->asic_type) {
+ case CHIP_TOPAZ: /* no MM engines */
+ default:
+ adev->mc.gart_size = 256ULL << 20;
+ break;
+#ifdef CONFIG_DRM_AMDGPU_CIK
+ case CHIP_BONAIRE: /* UVD, VCE do not support GPUVM */
+ case CHIP_HAWAII: /* UVD, VCE do not support GPUVM */
+ case CHIP_KAVERI: /* UVD, VCE do not support GPUVM */
+ case CHIP_KABINI: /* UVD, VCE do not support GPUVM */
+ case CHIP_MULLINS: /* UVD, VCE do not support GPUVM */
+ adev->mc.gart_size = 1024ULL << 20;
+ break;
+#endif
+ }
+ } else {
+ adev->mc.gart_size = (u64)amdgpu_gart_size << 20;
+ }
gmc_v7_0_vram_gtt_location(adev, &adev->mc);
@@ -575,7 +582,7 @@ static void gmc_v7_0_set_prt(struct amdgpu_device *adev, bool enable)
static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
{
int r, i;
- u32 tmp;
+ u32 tmp, field;
if (adev->gart.robj == NULL) {
dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
@@ -605,14 +612,16 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
tmp = REG_SET_FIELD(0, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1);
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
WREG32(mmVM_L2_CNTL2, tmp);
+
+ field = adev->vm_manager.fragment_size;
tmp = RREG32(mmVM_L2_CNTL3);
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY, 1);
- tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 4);
- tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, 4);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, field);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, field);
WREG32(mmVM_L2_CNTL3, tmp);
/* setup context0 */
- WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12);
- WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gtt_end >> 12);
+ WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gart_start >> 12);
+ WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gart_end >> 12);
WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12);
WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
(u32)(adev->dummy_page.addr >> 12));
@@ -666,7 +675,7 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
gmc_v7_0_gart_flush_gpu_tlb(adev, 0);
DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
- (unsigned)(adev->mc.gtt_size >> 20),
+ (unsigned)(adev->mc.gart_size >> 20),
(unsigned long long)adev->gart.table_addr);
adev->gart.ready = true;
return 0;
@@ -961,7 +970,7 @@ static int gmc_v7_0_sw_init(void *handle)
* Currently set to 4GB ((1 << 20) 4k pages).
* Max GPUVM size for cayman and SI is 40 bits.
*/
- amdgpu_vm_adjust_size(adev, 64);
+ amdgpu_vm_adjust_size(adev, 64, 4);
adev->vm_manager.max_pfn = adev->vm_manager.vm_size << 18;
/* Set the internal MC address mask
@@ -1138,7 +1147,6 @@ static int gmc_v7_0_wait_for_idle(void *handle)
static int gmc_v7_0_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct amdgpu_mode_mc_save save;
u32 srbm_soft_reset = 0;
u32 tmp = RREG32(mmSRBM_STATUS);
@@ -1154,7 +1162,7 @@ static int gmc_v7_0_soft_reset(void *handle)
}
if (srbm_soft_reset) {
- gmc_v7_0_mc_stop(adev, &save);
+ gmc_v7_0_mc_stop(adev);
if (gmc_v7_0_wait_for_idle((void *)adev)) {
dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
}
@@ -1175,7 +1183,7 @@ static int gmc_v7_0_soft_reset(void *handle)
/* Wait a little for things to settle down */
udelay(50);
- gmc_v7_0_mc_resume(adev, &save);
+ gmc_v7_0_mc_resume(adev);
udelay(50);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index cc9f88057cd5..3b3326daf32b 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -35,6 +35,9 @@
#include "oss/oss_3_0_d.h"
#include "oss/oss_3_0_sh_mask.h"
+#include "dce/dce_10_0_d.h"
+#include "dce/dce_10_0_sh_mask.h"
+
#include "vid.h"
#include "vi.h"
@@ -161,14 +164,10 @@ static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev)
}
}
-static void gmc_v8_0_mc_stop(struct amdgpu_device *adev,
- struct amdgpu_mode_mc_save *save)
+static void gmc_v8_0_mc_stop(struct amdgpu_device *adev)
{
u32 blackout;
- if (adev->mode_info.num_crtc)
- amdgpu_display_stop_mc_access(adev, save);
-
gmc_v8_0_wait_for_idle(adev);
blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
@@ -184,8 +183,7 @@ static void gmc_v8_0_mc_stop(struct amdgpu_device *adev,
udelay(100);
}
-static void gmc_v8_0_mc_resume(struct amdgpu_device *adev,
- struct amdgpu_mode_mc_save *save)
+static void gmc_v8_0_mc_resume(struct amdgpu_device *adev)
{
u32 tmp;
@@ -197,9 +195,6 @@ static void gmc_v8_0_mc_resume(struct amdgpu_device *adev,
tmp = REG_SET_FIELD(0, BIF_FB_EN, FB_READ_EN, 1);
tmp = REG_SET_FIELD(tmp, BIF_FB_EN, FB_WRITE_EN, 1);
WREG32(mmBIF_FB_EN, tmp);
-
- if (adev->mode_info.num_crtc)
- amdgpu_display_resume_mc_access(adev, save);
}
/**
@@ -404,15 +399,20 @@ static int gmc_v8_0_polaris_mc_load_microcode(struct amdgpu_device *adev)
static void gmc_v8_0_vram_gtt_location(struct amdgpu_device *adev,
struct amdgpu_mc *mc)
{
+ u64 base = 0;
+
+ if (!amdgpu_sriov_vf(adev))
+ base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF;
+ base <<= 24;
+
if (mc->mc_vram_size > 0xFFC0000000ULL) {
/* leave room for at least 1024M GTT */
dev_warn(adev->dev, "limiting VRAM\n");
mc->real_vram_size = 0xFFC0000000ULL;
mc->mc_vram_size = 0xFFC0000000ULL;
}
- amdgpu_vram_location(adev, &adev->mc, 0);
- adev->mc.gtt_base_align = 0;
- amdgpu_gtt_location(adev, mc);
+ amdgpu_vram_location(adev, &adev->mc, base);
+ amdgpu_gart_location(adev, mc);
}
/**
@@ -425,7 +425,6 @@ static void gmc_v8_0_vram_gtt_location(struct amdgpu_device *adev,
*/
static void gmc_v8_0_mc_program(struct amdgpu_device *adev)
{
- struct amdgpu_mode_mc_save save;
u32 tmp;
int i, j;
@@ -439,13 +438,20 @@ static void gmc_v8_0_mc_program(struct amdgpu_device *adev)
}
WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0);
- if (adev->mode_info.num_crtc)
- amdgpu_display_set_vga_render_state(adev, false);
-
- gmc_v8_0_mc_stop(adev, &save);
if (gmc_v8_0_wait_for_idle((void *)adev)) {
dev_warn(adev->dev, "Wait for MC idle timedout !\n");
}
+ if (adev->mode_info.num_crtc) {
+ /* Lockout access through VGA aperture*/
+ tmp = RREG32(mmVGA_HDP_CONTROL);
+ tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
+ WREG32(mmVGA_HDP_CONTROL, tmp);
+
+ /* disable VGA render */
+ tmp = RREG32(mmVGA_RENDER_CONTROL);
+ tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
+ WREG32(mmVGA_RENDER_CONTROL, tmp);
+ }
/* Update configuration */
WREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
adev->mc.vram_start >> 12);
@@ -453,20 +459,23 @@ static void gmc_v8_0_mc_program(struct amdgpu_device *adev)
adev->mc.vram_end >> 12);
WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
adev->vram_scratch.gpu_addr >> 12);
- tmp = ((adev->mc.vram_end >> 24) & 0xFFFF) << 16;
- tmp |= ((adev->mc.vram_start >> 24) & 0xFFFF);
- WREG32(mmMC_VM_FB_LOCATION, tmp);
- /* XXX double check these! */
- WREG32(mmHDP_NONSURFACE_BASE, (adev->mc.vram_start >> 8));
- WREG32(mmHDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
- WREG32(mmHDP_NONSURFACE_SIZE, 0x3FFFFFFF);
+
+ if (amdgpu_sriov_vf(adev)) {
+ tmp = ((adev->mc.vram_end >> 24) & 0xFFFF) << 16;
+ tmp |= ((adev->mc.vram_start >> 24) & 0xFFFF);
+ WREG32(mmMC_VM_FB_LOCATION, tmp);
+ /* XXX double check these! */
+ WREG32(mmHDP_NONSURFACE_BASE, (adev->mc.vram_start >> 8));
+ WREG32(mmHDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
+ WREG32(mmHDP_NONSURFACE_SIZE, 0x3FFFFFFF);
+ }
+
WREG32(mmMC_VM_AGP_BASE, 0);
WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF);
WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF);
if (gmc_v8_0_wait_for_idle((void *)adev)) {
dev_warn(adev->dev, "Wait for MC idle timedout !\n");
}
- gmc_v8_0_mc_resume(adev, &save);
WREG32(mmBIF_FB_EN, BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK);
@@ -553,14 +562,25 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
if (adev->mc.visible_vram_size > adev->mc.real_vram_size)
adev->mc.visible_vram_size = adev->mc.real_vram_size;
- /* unless the user had overridden it, set the gart
- * size equal to the 1024 or vram, whichever is larger.
- */
- if (amdgpu_gart_size == -1)
- adev->mc.gtt_size = max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20),
- adev->mc.mc_vram_size);
- else
- adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20;
+ /* set the gart size */
+ if (amdgpu_gart_size == -1) {
+ switch (adev->asic_type) {
+ case CHIP_POLARIS11: /* all engines support GPUVM */
+ case CHIP_POLARIS10: /* all engines support GPUVM */
+ case CHIP_POLARIS12: /* all engines support GPUVM */
+ default:
+ adev->mc.gart_size = 256ULL << 20;
+ break;
+ case CHIP_TONGA: /* UVD, VCE do not support GPUVM */
+ case CHIP_FIJI: /* UVD, VCE do not support GPUVM */
+ case CHIP_CARRIZO: /* UVD, VCE do not support GPUVM, DCE SG support */
+ case CHIP_STONEY: /* UVD does not support GPUVM, DCE SG support */
+ adev->mc.gart_size = 1024ULL << 20;
+ break;
+ }
+ } else {
+ adev->mc.gart_size = (u64)amdgpu_gart_size << 20;
+ }
gmc_v8_0_vram_gtt_location(adev, &adev->mc);
@@ -761,7 +781,7 @@ static void gmc_v8_0_set_prt(struct amdgpu_device *adev, bool enable)
static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
{
int r, i;
- u32 tmp;
+ u32 tmp, field;
if (adev->gart.robj == NULL) {
dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
@@ -792,10 +812,12 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1);
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
WREG32(mmVM_L2_CNTL2, tmp);
+
+ field = adev->vm_manager.fragment_size;
tmp = RREG32(mmVM_L2_CNTL3);
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY, 1);
- tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 4);
- tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, 4);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, field);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, field);
WREG32(mmVM_L2_CNTL3, tmp);
/* XXX: set to enable PTE/PDE in system memory */
tmp = RREG32(mmVM_L2_CNTL4);
@@ -813,8 +835,8 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PTE_REQUEST_SNOOP, 0);
WREG32(mmVM_L2_CNTL4, tmp);
/* setup context0 */
- WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12);
- WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gtt_end >> 12);
+ WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gart_start >> 12);
+ WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gart_end >> 12);
WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12);
WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
(u32)(adev->dummy_page.addr >> 12));
@@ -869,7 +891,7 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
gmc_v8_0_gart_flush_gpu_tlb(adev, 0);
DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
- (unsigned)(adev->mc.gtt_size >> 20),
+ (unsigned)(adev->mc.gart_size >> 20),
(unsigned long long)adev->gart.table_addr);
adev->gart.ready = true;
return 0;
@@ -1045,7 +1067,7 @@ static int gmc_v8_0_sw_init(void *handle)
* Currently set to 4GB ((1 << 20) 4k pages).
* Max GPUVM size for cayman and SI is 40 bits.
*/
- amdgpu_vm_adjust_size(adev, 64);
+ amdgpu_vm_adjust_size(adev, 64, 4);
adev->vm_manager.max_pfn = adev->vm_manager.vm_size << 18;
/* Set the internal MC address mask
@@ -1260,7 +1282,7 @@ static int gmc_v8_0_pre_soft_reset(void *handle)
if (!adev->mc.srbm_soft_reset)
return 0;
- gmc_v8_0_mc_stop(adev, &adev->mc.save);
+ gmc_v8_0_mc_stop(adev);
if (gmc_v8_0_wait_for_idle(adev)) {
dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
}
@@ -1306,7 +1328,7 @@ static int gmc_v8_0_post_soft_reset(void *handle)
if (!adev->mc.srbm_soft_reset)
return 0;
- gmc_v8_0_mc_resume(adev, &adev->mc.save);
+ gmc_v8_0_mc_resume(adev);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 175ba5f9691c..d04d0b123212 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -23,11 +23,14 @@
#include <linux/firmware.h>
#include "amdgpu.h"
#include "gmc_v9_0.h"
+#include "amdgpu_atomfirmware.h"
#include "vega10/soc15ip.h"
#include "vega10/HDP/hdp_4_0_offset.h"
#include "vega10/HDP/hdp_4_0_sh_mask.h"
#include "vega10/GC/gc_9_0_sh_mask.h"
+#include "vega10/DC/dce_12_0_offset.h"
+#include "vega10/DC/dce_12_0_sh_mask.h"
#include "vega10/vega10_enum.h"
#include "soc15_common.h"
@@ -419,8 +422,7 @@ static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
if (!amdgpu_sriov_vf(adev))
base = mmhub_v1_0_get_fb_location(adev);
amdgpu_vram_location(adev, &adev->mc, base);
- adev->mc.gtt_base_align = 0;
- amdgpu_gtt_location(adev, mc);
+ amdgpu_gart_location(adev, mc);
/* base offset of vram pages */
if (adev->flags & AMD_IS_APU)
adev->vm_manager.vram_base_offset = gfxhub_v1_0_get_mc_fb_offset(adev);
@@ -442,43 +444,46 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
u32 tmp;
int chansize, numchan;
- /* hbm memory channel size */
- chansize = 128;
-
- tmp = RREG32_SOC15(DF, 0, mmDF_CS_AON0_DramBaseAddress0);
- tmp &= DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK;
- tmp >>= DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT;
- switch (tmp) {
- case 0:
- default:
- numchan = 1;
- break;
- case 1:
- numchan = 2;
- break;
- case 2:
- numchan = 0;
- break;
- case 3:
- numchan = 4;
- break;
- case 4:
- numchan = 0;
- break;
- case 5:
- numchan = 8;
- break;
- case 6:
- numchan = 0;
- break;
- case 7:
- numchan = 16;
- break;
- case 8:
- numchan = 2;
- break;
+ adev->mc.vram_width = amdgpu_atomfirmware_get_vram_width(adev);
+ if (!adev->mc.vram_width) {
+ /* hbm memory channel size */
+ chansize = 128;
+
+ tmp = RREG32_SOC15(DF, 0, mmDF_CS_AON0_DramBaseAddress0);
+ tmp &= DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK;
+ tmp >>= DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT;
+ switch (tmp) {
+ case 0:
+ default:
+ numchan = 1;
+ break;
+ case 1:
+ numchan = 2;
+ break;
+ case 2:
+ numchan = 0;
+ break;
+ case 3:
+ numchan = 4;
+ break;
+ case 4:
+ numchan = 0;
+ break;
+ case 5:
+ numchan = 8;
+ break;
+ case 6:
+ numchan = 0;
+ break;
+ case 7:
+ numchan = 16;
+ break;
+ case 8:
+ numchan = 2;
+ break;
+ }
+ adev->mc.vram_width = numchan * chansize;
}
- adev->mc.vram_width = numchan * chansize;
/* Could aper size report 0 ? */
adev->mc.aper_base = pci_resource_start(adev->pdev, 0);
@@ -494,14 +499,20 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
if (adev->mc.visible_vram_size > adev->mc.real_vram_size)
adev->mc.visible_vram_size = adev->mc.real_vram_size;
- /* unless the user had overridden it, set the gart
- * size equal to the 1024 or vram, whichever is larger.
- */
- if (amdgpu_gart_size == -1)
- adev->mc.gtt_size = max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20),
- adev->mc.mc_vram_size);
- else
- adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20;
+ /* set the gart size */
+ if (amdgpu_gart_size == -1) {
+ switch (adev->asic_type) {
+ case CHIP_VEGA10: /* all engines support GPUVM */
+ default:
+ adev->mc.gart_size = 256ULL << 20;
+ break;
+ case CHIP_RAVEN: /* DCE SG support */
+ adev->mc.gart_size = 1024ULL << 20;
+ break;
+ }
+ } else {
+ adev->mc.gart_size = (u64)amdgpu_gart_size << 20;
+ }
gmc_v9_0_vram_gtt_location(adev, &adev->mc);
@@ -537,10 +548,21 @@ static int gmc_v9_0_sw_init(void *handle)
spin_lock_init(&adev->mc.invalidate_lock);
- if (adev->flags & AMD_IS_APU) {
+ switch (adev->asic_type) {
+ case CHIP_RAVEN:
adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
- amdgpu_vm_adjust_size(adev, 64);
- } else {
+ if (adev->rev_id == 0x0 || adev->rev_id == 0x1) {
+ adev->vm_manager.vm_size = 1U << 18;
+ adev->vm_manager.block_size = 9;
+ adev->vm_manager.num_level = 3;
+ amdgpu_vm_set_fragment_size(adev, 9);
+ } else {
+ /* vm_size is 64GB for legacy 2-level page support */
+ amdgpu_vm_adjust_size(adev, 64, 9);
+ adev->vm_manager.num_level = 1;
+ }
+ break;
+ case CHIP_VEGA10:
/* XXX Don't know how to get VRAM type yet. */
adev->mc.vram_type = AMDGPU_VRAM_TYPE_HBM;
/*
@@ -550,11 +572,18 @@ static int gmc_v9_0_sw_init(void *handle)
*/
adev->vm_manager.vm_size = 1U << 18;
adev->vm_manager.block_size = 9;
- DRM_INFO("vm size is %llu GB, block size is %u-bit\n",
- adev->vm_manager.vm_size,
- adev->vm_manager.block_size);
+ adev->vm_manager.num_level = 3;
+ amdgpu_vm_set_fragment_size(adev, 9);
+ break;
+ default:
+ break;
}
+ DRM_INFO("vm size is %llu GB, block size is %u-bit,fragment size is %u-bit\n",
+ adev->vm_manager.vm_size,
+ adev->vm_manager.block_size,
+ adev->vm_manager.fragment_size);
+
/* This interrupt is VMC page fault.*/
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_VMC, 0,
&adev->mc.vm_fault);
@@ -619,11 +648,6 @@ static int gmc_v9_0_sw_init(void *handle)
adev->vm_manager.id_mgr[AMDGPU_GFXHUB].num_ids = AMDGPU_NUM_OF_VMIDS;
adev->vm_manager.id_mgr[AMDGPU_MMHUB].num_ids = AMDGPU_NUM_OF_VMIDS;
- /* TODO: fix num_level for APU when updating vm size and block size */
- if (adev->flags & AMD_IS_APU)
- adev->vm_manager.num_level = 1;
- else
- adev->vm_manager.num_level = 3;
amdgpu_vm_manager_init(adev);
return 0;
@@ -731,7 +755,7 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
gmc_v9_0_gart_flush_gpu_tlb(adev, 0);
DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
- (unsigned)(adev->mc.gtt_size >> 20),
+ (unsigned)(adev->mc.gart_size >> 20),
(unsigned long long)adev->gart.table_addr);
adev->gart.ready = true;
return 0;
@@ -745,6 +769,20 @@ static int gmc_v9_0_hw_init(void *handle)
/* The sequence of these two function calls matters.*/
gmc_v9_0_init_golden_registers(adev);
+ if (adev->mode_info.num_crtc) {
+ u32 tmp;
+
+ /* Lockout access through VGA aperture*/
+ tmp = RREG32_SOC15(DCE, 0, mmVGA_HDP_CONTROL);
+ tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
+ WREG32_SOC15(DCE, 0, mmVGA_HDP_CONTROL, tmp);
+
+ /* disable VGA render */
+ tmp = RREG32_SOC15(DCE, 0, mmVGA_RENDER_CONTROL);
+ tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
+ WREG32_SOC15(DCE, 0, mmVGA_RENDER_CONTROL, tmp);
+ }
+
r = gmc_v9_0_gart_enable(adev);
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
index 9804318f3488..74cb647da30e 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
@@ -69,14 +69,14 @@ static void mmhub_v1_0_init_gart_aperture_regs(struct amdgpu_device *adev)
mmhub_v1_0_init_gart_pt_regs(adev);
WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
- (u32)(adev->mc.gtt_start >> 12));
+ (u32)(adev->mc.gart_start >> 12));
WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
- (u32)(adev->mc.gtt_start >> 44));
+ (u32)(adev->mc.gart_start >> 44));
WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
- (u32)(adev->mc.gtt_end >> 12));
+ (u32)(adev->mc.gart_end >> 12));
WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
- (u32)(adev->mc.gtt_end >> 44));
+ (u32)(adev->mc.gart_end >> 44));
}
static void mmhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
@@ -143,7 +143,7 @@ static void mmhub_v1_0_init_cache_regs(struct amdgpu_device *adev)
/* Setup L2 cache */
tmp = RREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL);
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1);
- tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 0);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 1);
/* XXX for emulation, Refer to closed source code.*/
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, L2_PDE0_CACHE_TAG_GENERATION_MODE,
0);
@@ -158,6 +158,8 @@ static void mmhub_v1_0_init_cache_regs(struct amdgpu_device *adev)
WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL2, tmp);
tmp = mmVM_L2_CNTL3_DEFAULT;
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 9);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, 6);
WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL3, tmp);
tmp = mmVM_L2_CNTL4_DEFAULT;
@@ -222,6 +224,9 @@ static void mmhub_v1_0_setup_vmid_config(struct amdgpu_device *adev)
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
PAGE_TABLE_BLOCK_SIZE,
adev->vm_manager.block_size - 9);
+ /* Send no-retry XNACK on fault to suppress VM fault storm. */
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
+ RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0);
WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_CNTL, i, tmp);
WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32, i*2, 0);
WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32, i*2, 0);
@@ -245,28 +250,28 @@ static void mmhub_v1_0_program_invalidation(struct amdgpu_device *adev)
}
struct pctl_data {
- uint32_t index;
- uint32_t data;
+ uint32_t index;
+ uint32_t data;
};
-const struct pctl_data pctl0_data[] = {
- {0x0, 0x7a640},
- {0x9, 0x2a64a},
- {0xd, 0x2a680},
- {0x11, 0x6a684},
- {0x19, 0xea68e},
- {0x29, 0xa69e},
- {0x2b, 0x34a6c0},
- {0x61, 0x83a707},
- {0xe6, 0x8a7a4},
- {0xf0, 0x1a7b8},
- {0xf3, 0xfa7cc},
- {0x104, 0x17a7dd},
- {0x11d, 0xa7dc},
- {0x11f, 0x12a7f5},
- {0x133, 0xa808},
- {0x135, 0x12a810},
- {0x149, 0x7a82c}
+static const struct pctl_data pctl0_data[] = {
+ {0x0, 0x7a640},
+ {0x9, 0x2a64a},
+ {0xd, 0x2a680},
+ {0x11, 0x6a684},
+ {0x19, 0xea68e},
+ {0x29, 0xa69e},
+ {0x2b, 0x34a6c0},
+ {0x61, 0x83a707},
+ {0xe6, 0x8a7a4},
+ {0xf0, 0x1a7b8},
+ {0xf3, 0xfa7cc},
+ {0x104, 0x17a7dd},
+ {0x11d, 0xa7dc},
+ {0x11f, 0x12a7f5},
+ {0x133, 0xa808},
+ {0x135, 0x12a810},
+ {0x149, 0x7a82c}
};
#define PCTL0_DATA_LEN (sizeof(pctl0_data)/sizeof(pctl0_data[0]))
@@ -274,32 +279,39 @@ const struct pctl_data pctl0_data[] = {
#define PCTL0_STCTRL_REG_SAVE_RANGE0_BASE 0xa640
#define PCTL0_STCTRL_REG_SAVE_RANGE0_LIMIT 0xa833
-const struct pctl_data pctl1_data[] = {
- {0x0, 0x39a000},
- {0x3b, 0x44a040},
- {0x81, 0x2a08d},
- {0x85, 0x6ba094},
- {0xf2, 0x18a100},
- {0x10c, 0x4a132},
- {0x112, 0xca141},
- {0x120, 0x2fa158},
- {0x151, 0x17a1d0},
- {0x16a, 0x1a1e9},
- {0x16d, 0x13a1ec},
- {0x182, 0x7a201},
- {0x18b, 0x3a20a},
- {0x190, 0x7a580},
- {0x199, 0xa590},
- {0x19b, 0x4a594},
- {0x1a1, 0x1a59c},
- {0x1a4, 0x7a82c},
- {0x1ad, 0xfa7cc},
- {0x1be, 0x17a7dd},
- {0x1d7, 0x12a810}
+static const struct pctl_data pctl1_data[] = {
+ {0x0, 0x39a000},
+ {0x3b, 0x44a040},
+ {0x81, 0x2a08d},
+ {0x85, 0x6ba094},
+ {0xf2, 0x18a100},
+ {0x10c, 0x4a132},
+ {0x112, 0xca141},
+ {0x120, 0x2fa158},
+ {0x151, 0x17a1d0},
+ {0x16a, 0x1a1e9},
+ {0x16d, 0x13a1ec},
+ {0x182, 0x7a201},
+ {0x18b, 0x3a20a},
+ {0x190, 0x7a580},
+ {0x199, 0xa590},
+ {0x19b, 0x4a594},
+ {0x1a1, 0x1a59c},
+ {0x1a4, 0x7a82c},
+ {0x1ad, 0xfa7cc},
+ {0x1be, 0x17a7dd},
+ {0x1d7, 0x12a810},
+ {0x1eb, 0x4000a7e1},
+ {0x1ec, 0x5000a7f5},
+ {0x1ed, 0x4000a7e2},
+ {0x1ee, 0x5000a7dc},
+ {0x1ef, 0x4000a7e3},
+ {0x1f0, 0x5000a7f6},
+ {0x1f1, 0x5000a7e4}
};
#define PCTL1_DATA_LEN (sizeof(pctl1_data)/sizeof(pctl1_data[0]))
-#define PCTL1_RENG_EXEC_END_PTR 0x1ea
+#define PCTL1_RENG_EXEC_END_PTR 0x1f1
#define PCTL1_STCTRL_REG_SAVE_RANGE0_BASE 0xa000
#define PCTL1_STCTRL_REG_SAVE_RANGE0_LIMIT 0xa20d
#define PCTL1_STCTRL_REG_SAVE_RANGE1_BASE 0xa580
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h
index 57bb940c0ecd..5d38229baf69 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h
@@ -36,7 +36,4 @@ void mmhub_v1_0_initialize_power_gating(struct amdgpu_device *adev);
void mmhub_v1_0_update_power_gating(struct amdgpu_device *adev,
bool enable);
-extern const struct amd_ip_funcs mmhub_v1_0_ip_funcs;
-extern const struct amdgpu_ip_block_version mmhub_v1_0_ip_block;
-
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
index bde3ca3c21c1..2812d88a8bdd 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
@@ -72,21 +72,6 @@ static void xgpu_ai_mailbox_set_valid(struct amdgpu_device *adev, bool val)
reg);
}
-static void xgpu_ai_mailbox_trans_msg(struct amdgpu_device *adev,
- enum idh_request req)
-{
- u32 reg;
-
- reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
- mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0));
- reg = REG_SET_FIELD(reg, BIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0,
- MSGBUF_DATA, req);
- WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0),
- reg);
-
- xgpu_ai_mailbox_set_valid(adev, true);
-}
-
static int xgpu_ai_mailbox_rcv_msg(struct amdgpu_device *adev,
enum idh_event event)
{
@@ -154,13 +139,25 @@ static int xgpu_ai_poll_msg(struct amdgpu_device *adev, enum idh_event event)
return r;
}
-
-static int xgpu_ai_send_access_requests(struct amdgpu_device *adev,
- enum idh_request req)
-{
+static void xgpu_ai_mailbox_trans_msg (struct amdgpu_device *adev,
+ enum idh_request req, u32 data1, u32 data2, u32 data3) {
+ u32 reg;
int r;
- xgpu_ai_mailbox_trans_msg(adev, req);
+ reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
+ mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0));
+ reg = REG_SET_FIELD(reg, BIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0,
+ MSGBUF_DATA, req);
+ WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0),
+ reg);
+ WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW1),
+ data1);
+ WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW2),
+ data2);
+ WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW3),
+ data3);
+
+ xgpu_ai_mailbox_set_valid(adev, true);
/* start to poll ack */
r = xgpu_ai_poll_ack(adev);
@@ -168,6 +165,14 @@ static int xgpu_ai_send_access_requests(struct amdgpu_device *adev,
pr_err("Doesn't get ack from pf, continue\n");
xgpu_ai_mailbox_set_valid(adev, false);
+}
+
+static int xgpu_ai_send_access_requests(struct amdgpu_device *adev,
+ enum idh_request req)
+{
+ int r;
+
+ xgpu_ai_mailbox_trans_msg(adev, req, 0, 0, 0);
/* start to check msg if request is idh_req_gpu_init_access */
if (req == IDH_REQ_GPU_INIT_ACCESS ||
@@ -342,4 +347,5 @@ const struct amdgpu_virt_ops xgpu_ai_virt_ops = {
.req_full_gpu = xgpu_ai_request_full_gpu_access,
.rel_full_gpu = xgpu_ai_release_full_gpu_access,
.reset_gpu = xgpu_ai_request_reset,
+ .trans_msg = xgpu_ai_mailbox_trans_msg,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
index 9aefc44d2c34..1e91b9a1c591 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
@@ -31,7 +31,9 @@ enum idh_request {
IDH_REL_GPU_INIT_ACCESS,
IDH_REQ_GPU_FINI_ACCESS,
IDH_REL_GPU_FINI_ACCESS,
- IDH_REQ_GPU_RESET_ACCESS
+ IDH_REQ_GPU_RESET_ACCESS,
+
+ IDH_LOG_VF_ERROR = 200,
};
enum idh_event {
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
index 171a658135b5..c25a831f94ec 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
@@ -613,4 +613,5 @@ const struct amdgpu_virt_ops xgpu_vi_virt_ops = {
.req_full_gpu = xgpu_vi_request_full_gpu_access,
.rel_full_gpu = xgpu_vi_release_full_gpu_access,
.reset_gpu = xgpu_vi_request_reset,
+ .trans_msg = NULL, /* Does not need to trans VF errors to host. */
};
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.h
index 2db741131bc6..c791d73d2d54 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.h
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.h
@@ -32,7 +32,9 @@ enum idh_request {
IDH_REL_GPU_INIT_ACCESS,
IDH_REQ_GPU_FINI_ACCESS,
IDH_REL_GPU_FINI_ACCESS,
- IDH_REQ_GPU_RESET_ACCESS
+ IDH_REQ_GPU_RESET_ACCESS,
+
+ IDH_LOG_VF_ERROR = 200,
};
/* VI mailbox messages data */
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
index 1e272f785def..045988b18bc3 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
@@ -32,6 +32,7 @@
#define smnCPM_CONTROL 0x11180460
#define smnPCIE_CNTL2 0x11180070
+#define smnPCIE_CONFIG_CNTL 0x11180044
u32 nbio_v6_1_get_rev_id(struct amdgpu_device *adev)
{
@@ -67,7 +68,7 @@ void nbio_v6_1_mc_access_enable(struct amdgpu_device *adev, bool enable)
void nbio_v6_1_hdp_flush(struct amdgpu_device *adev)
{
- WREG32_SOC15(NBIO, 0, mmBIF_BX_PF0_HDP_MEM_COHERENCY_FLUSH_CNTL, 0);
+ WREG32_SOC15_NO_KIQ(NBIO, 0, mmBIF_BX_PF0_HDP_MEM_COHERENCY_FLUSH_CNTL, 0);
}
u32 nbio_v6_1_get_memsize(struct amdgpu_device *adev)
@@ -256,3 +257,15 @@ void nbio_v6_1_detect_hw_virt(struct amdgpu_device *adev)
adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
}
}
+
+void nbio_v6_1_init_registers(struct amdgpu_device *adev)
+{
+ uint32_t def, data;
+
+ def = data = RREG32_PCIE(smnPCIE_CONFIG_CNTL);
+ data = REG_SET_FIELD(data, PCIE_CONFIG_CNTL, CI_SWUS_MAX_READ_REQUEST_SIZE_MODE, 1);
+ data = REG_SET_FIELD(data, PCIE_CONFIG_CNTL, CI_SWUS_MAX_READ_REQUEST_SIZE_PRIV, 1);
+
+ if (def != data)
+ WREG32_PCIE(smnPCIE_CONFIG_CNTL, data);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.h b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.h
index f6f8bc045518..686e4b4d296a 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.h
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.h
@@ -50,5 +50,6 @@ void nbio_v6_1_update_medium_grain_clock_gating(struct amdgpu_device *adev, bool
void nbio_v6_1_update_medium_grain_light_sleep(struct amdgpu_device *adev, bool enable);
void nbio_v6_1_get_clockgating_state(struct amdgpu_device *adev, u32 *flags);
void nbio_v6_1_detect_hw_virt(struct amdgpu_device *adev);
+void nbio_v6_1_init_registers(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
index aa04632523fa..11b70d601922 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
@@ -65,7 +65,7 @@ void nbio_v7_0_mc_access_enable(struct amdgpu_device *adev, bool enable)
void nbio_v7_0_hdp_flush(struct amdgpu_device *adev)
{
- WREG32_SOC15(NBIO, 0, mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0);
+ WREG32_SOC15_NO_KIQ(NBIO, 0, mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0);
}
u32 nbio_v7_0_get_memsize(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
index 2258323a3c26..f7cf994b1da2 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
@@ -86,6 +86,52 @@ psp_v10_0_get_fw_type(struct amdgpu_firmware_info *ucode, enum psp_gfx_fw_type *
return 0;
}
+int psp_v10_0_init_microcode(struct psp_context *psp)
+{
+ struct amdgpu_device *adev = psp->adev;
+ const char *chip_name;
+ char fw_name[30];
+ int err = 0;
+ const struct psp_firmware_header_v1_0 *hdr;
+
+ DRM_DEBUG("\n");
+
+ switch (adev->asic_type) {
+ case CHIP_RAVEN:
+ chip_name = "raven";
+ break;
+ default: BUG();
+ }
+
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_asd.bin", chip_name);
+ err = request_firmware(&adev->psp.asd_fw, fw_name, adev->dev);
+ if (err)
+ goto out;
+
+ err = amdgpu_ucode_validate(adev->psp.asd_fw);
+ if (err)
+ goto out;
+
+ hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.asd_fw->data;
+ adev->psp.asd_fw_version = le32_to_cpu(hdr->header.ucode_version);
+ adev->psp.asd_feature_version = le32_to_cpu(hdr->ucode_feature_version);
+ adev->psp.asd_ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes);
+ adev->psp.asd_start_addr = (uint8_t *)hdr +
+ le32_to_cpu(hdr->header.ucode_array_offset_bytes);
+
+ return 0;
+out:
+ if (err) {
+ dev_err(adev->dev,
+ "psp v10.0: Failed to load firmware \"%s\"\n",
+ fw_name);
+ release_firmware(adev->psp.asd_fw);
+ adev->psp.asd_fw = NULL;
+ }
+
+ return err;
+}
+
int psp_v10_0_prep_cmd_buf(struct amdgpu_firmware_info *ucode, struct psp_gfx_cmd_resp *cmd)
{
int ret;
@@ -110,7 +156,6 @@ int psp_v10_0_prep_cmd_buf(struct amdgpu_firmware_info *ucode, struct psp_gfx_cm
int psp_v10_0_ring_init(struct psp_context *psp, enum psp_ring_type ring_type)
{
int ret = 0;
- unsigned int psp_ring_reg = 0;
struct psp_ring *ring;
struct amdgpu_device *adev = psp->adev;
@@ -130,6 +175,16 @@ int psp_v10_0_ring_init(struct psp_context *psp, enum psp_ring_type ring_type)
return ret;
}
+ return 0;
+}
+
+int psp_v10_0_ring_create(struct psp_context *psp, enum psp_ring_type ring_type)
+{
+ int ret = 0;
+ unsigned int psp_ring_reg = 0;
+ struct psp_ring *ring = &psp->km_ring;
+ struct amdgpu_device *adev = psp->adev;
+
/* Write low address of the ring to C2PMSG_69 */
psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr);
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_69, psp_ring_reg);
@@ -143,13 +198,42 @@ int psp_v10_0_ring_init(struct psp_context *psp, enum psp_ring_type ring_type)
psp_ring_reg = ring_type;
psp_ring_reg = psp_ring_reg << 16;
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, psp_ring_reg);
+
+ /* There might be handshake issue with hardware which needs delay */
+ mdelay(20);
+
/* Wait for response flag (bit 31) in C2PMSG_64 */
- psp_ring_reg = 0;
- while ((psp_ring_reg & 0x80000000) == 0) {
- psp_ring_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64);
- }
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
+ 0x80000000, 0x8000FFFF, false);
- return 0;
+ return ret;
+}
+
+int psp_v10_0_ring_destroy(struct psp_context *psp, enum psp_ring_type ring_type)
+{
+ int ret = 0;
+ struct psp_ring *ring;
+ unsigned int psp_ring_reg = 0;
+ struct amdgpu_device *adev = psp->adev;
+
+ ring = &psp->km_ring;
+
+ /* Write the ring destroy command to C2PMSG_64 */
+ psp_ring_reg = 3 << 16;
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, psp_ring_reg);
+
+ /* There might be handshake issue with hardware which needs delay */
+ mdelay(20);
+
+ /* Wait for response flag (bit 31) in C2PMSG_64 */
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
+ 0x80000000, 0x80000000, false);
+
+ amdgpu_bo_free_kernel(&adev->firmware.rbuf,
+ &ring->ring_mem_mc_addr,
+ (void **)&ring->ring_mem);
+
+ return ret;
}
int psp_v10_0_cmd_submit(struct psp_context *psp,
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.h b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.h
index 2022b7b7151e..e76cde2f01f9 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.h
@@ -27,10 +27,15 @@
#include "amdgpu_psp.h"
+extern int psp_v10_0_init_microcode(struct psp_context *psp);
extern int psp_v10_0_prep_cmd_buf(struct amdgpu_firmware_info *ucode,
struct psp_gfx_cmd_resp *cmd);
extern int psp_v10_0_ring_init(struct psp_context *psp,
enum psp_ring_type ring_type);
+extern int psp_v10_0_ring_create(struct psp_context *psp,
+ enum psp_ring_type ring_type);
+extern int psp_v10_0_ring_destroy(struct psp_context *psp,
+ enum psp_ring_type ring_type);
extern int psp_v10_0_cmd_submit(struct psp_context *psp,
struct amdgpu_firmware_info *ucode,
uint64_t cmd_buf_mc_addr, uint64_t fence_mc_addr,
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
index c98d77d0c8f8..2a535a4b8d5b 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
@@ -237,11 +237,9 @@ int psp_v3_1_bootloader_load_sos(struct psp_context *psp)
/* there might be handshake issue with hardware which needs delay */
mdelay(20);
-#if 0
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_81),
RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81),
0, true);
-#endif
return ret;
}
@@ -341,10 +339,10 @@ int psp_v3_1_ring_destroy(struct psp_context *psp, enum psp_ring_type ring_type)
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
0x80000000, 0x80000000, false);
- if (ring->ring_mem)
- amdgpu_bo_free_kernel(&adev->firmware.rbuf,
- &ring->ring_mem_mc_addr,
- (void **)&ring->ring_mem);
+ amdgpu_bo_free_kernel(&adev->firmware.rbuf,
+ &ring->ring_mem_mc_addr,
+ (void **)&ring->ring_mem);
+
return ret;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index 1d766ae98dc8..b1de44f22824 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -551,17 +551,53 @@ static void sdma_v3_0_rlc_stop(struct amdgpu_device *adev)
*/
static void sdma_v3_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
{
- u32 f32_cntl;
+ u32 f32_cntl, phase_quantum = 0;
int i;
+ if (amdgpu_sdma_phase_quantum) {
+ unsigned value = amdgpu_sdma_phase_quantum;
+ unsigned unit = 0;
+
+ while (value > (SDMA0_PHASE0_QUANTUM__VALUE_MASK >>
+ SDMA0_PHASE0_QUANTUM__VALUE__SHIFT)) {
+ value = (value + 1) >> 1;
+ unit++;
+ }
+ if (unit > (SDMA0_PHASE0_QUANTUM__UNIT_MASK >>
+ SDMA0_PHASE0_QUANTUM__UNIT__SHIFT)) {
+ value = (SDMA0_PHASE0_QUANTUM__VALUE_MASK >>
+ SDMA0_PHASE0_QUANTUM__VALUE__SHIFT);
+ unit = (SDMA0_PHASE0_QUANTUM__UNIT_MASK >>
+ SDMA0_PHASE0_QUANTUM__UNIT__SHIFT);
+ WARN_ONCE(1,
+ "clamping sdma_phase_quantum to %uK clock cycles\n",
+ value << unit);
+ }
+ phase_quantum =
+ value << SDMA0_PHASE0_QUANTUM__VALUE__SHIFT |
+ unit << SDMA0_PHASE0_QUANTUM__UNIT__SHIFT;
+ }
+
for (i = 0; i < adev->sdma.num_instances; i++) {
f32_cntl = RREG32(mmSDMA0_CNTL + sdma_offsets[i]);
- if (enable)
+ if (enable) {
f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
AUTO_CTXSW_ENABLE, 1);
- else
+ f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
+ ATC_L1_ENABLE, 1);
+ if (amdgpu_sdma_phase_quantum) {
+ WREG32(mmSDMA0_PHASE0_QUANTUM + sdma_offsets[i],
+ phase_quantum);
+ WREG32(mmSDMA0_PHASE1_QUANTUM + sdma_offsets[i],
+ phase_quantum);
+ }
+ } else {
f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
AUTO_CTXSW_ENABLE, 0);
+ f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
+ ATC_L1_ENABLE, 1);
+ }
+
WREG32(mmSDMA0_CNTL + sdma_offsets[i], f32_cntl);
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index 4a65697ccc94..fd7c72aaafa6 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -291,6 +291,8 @@ static void sdma_v4_0_ring_set_wptr(struct amdgpu_ring *ring)
DRM_DEBUG("Setting write pointer\n");
if (ring->use_doorbell) {
+ u64 *wb = (u64 *)&adev->wb.wb[ring->wptr_offs];
+
DRM_DEBUG("Using doorbell -- "
"wptr_offs == 0x%08x "
"lower_32_bits(ring->wptr) << 2 == 0x%08x "
@@ -299,8 +301,7 @@ static void sdma_v4_0_ring_set_wptr(struct amdgpu_ring *ring)
lower_32_bits(ring->wptr << 2),
upper_32_bits(ring->wptr << 2));
/* XXX check if swapping is necessary on BE */
- adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr << 2);
- adev->wb.wb[ring->wptr_offs + 1] = upper_32_bits(ring->wptr << 2);
+ WRITE_ONCE(*wb, (ring->wptr << 2));
DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n",
ring->doorbell_index, ring->wptr << 2);
WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
@@ -493,13 +494,45 @@ static void sdma_v4_0_rlc_stop(struct amdgpu_device *adev)
*/
static void sdma_v4_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
{
- u32 f32_cntl;
+ u32 f32_cntl, phase_quantum = 0;
int i;
+ if (amdgpu_sdma_phase_quantum) {
+ unsigned value = amdgpu_sdma_phase_quantum;
+ unsigned unit = 0;
+
+ while (value > (SDMA0_PHASE0_QUANTUM__VALUE_MASK >>
+ SDMA0_PHASE0_QUANTUM__VALUE__SHIFT)) {
+ value = (value + 1) >> 1;
+ unit++;
+ }
+ if (unit > (SDMA0_PHASE0_QUANTUM__UNIT_MASK >>
+ SDMA0_PHASE0_QUANTUM__UNIT__SHIFT)) {
+ value = (SDMA0_PHASE0_QUANTUM__VALUE_MASK >>
+ SDMA0_PHASE0_QUANTUM__VALUE__SHIFT);
+ unit = (SDMA0_PHASE0_QUANTUM__UNIT_MASK >>
+ SDMA0_PHASE0_QUANTUM__UNIT__SHIFT);
+ WARN_ONCE(1,
+ "clamping sdma_phase_quantum to %uK clock cycles\n",
+ value << unit);
+ }
+ phase_quantum =
+ value << SDMA0_PHASE0_QUANTUM__VALUE__SHIFT |
+ unit << SDMA0_PHASE0_QUANTUM__UNIT__SHIFT;
+ }
+
for (i = 0; i < adev->sdma.num_instances; i++) {
f32_cntl = RREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_CNTL));
f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
AUTO_CTXSW_ENABLE, enable ? 1 : 0);
+ if (enable && amdgpu_sdma_phase_quantum) {
+ WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_PHASE0_QUANTUM),
+ phase_quantum);
+ WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_PHASE1_QUANTUM),
+ phase_quantum);
+ WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_PHASE2_QUANTUM),
+ phase_quantum);
+ }
WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_CNTL), f32_cntl);
}
@@ -541,12 +574,13 @@ static void sdma_v4_0_enable(struct amdgpu_device *adev, bool enable)
static int sdma_v4_0_gfx_resume(struct amdgpu_device *adev)
{
struct amdgpu_ring *ring;
- u32 rb_cntl, ib_cntl;
+ u32 rb_cntl, ib_cntl, wptr_poll_cntl;
u32 rb_bufsz;
u32 wb_offset;
u32 doorbell;
u32 doorbell_offset;
u32 temp;
+ u64 wptr_gpu_addr;
int i, r;
for (i = 0; i < adev->sdma.num_instances; i++) {
@@ -628,6 +662,19 @@ static int sdma_v4_0_gfx_resume(struct amdgpu_device *adev)
WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_F32_CNTL), temp);
}
+ /* setup the wptr shadow polling */
+ wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
+ WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO),
+ lower_32_bits(wptr_gpu_addr));
+ WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI),
+ upper_32_bits(wptr_gpu_addr));
+ wptr_poll_cntl = RREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL));
+ if (amdgpu_sriov_vf(adev))
+ wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl, SDMA0_GFX_RB_WPTR_POLL_CNTL, F32_POLL_ENABLE, 1);
+ else
+ wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl, SDMA0_GFX_RB_WPTR_POLL_CNTL, F32_POLL_ENABLE, 0);
+ WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL), wptr_poll_cntl);
+
/* enable DMA RB */
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 1);
WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
@@ -655,6 +702,7 @@ static int sdma_v4_0_gfx_resume(struct amdgpu_device *adev)
if (adev->mman.buffer_funcs_ring == ring)
amdgpu_ttm_set_active_vram_size(adev, adev->mc.real_vram_size);
+
}
return 0;
@@ -751,15 +799,12 @@ static int sdma_v4_0_load_microcode(struct amdgpu_device *adev)
const struct sdma_firmware_header_v1_0 *hdr;
const __le32 *fw_data;
u32 fw_size;
- u32 digest_size = 0;
int i, j;
/* halt the MEs */
sdma_v4_0_enable(adev, false);
for (i = 0; i < adev->sdma.num_instances; i++) {
- uint16_t version_major;
- uint16_t version_minor;
if (!adev->sdma.instance[i].fw)
return -EINVAL;
@@ -767,23 +812,12 @@ static int sdma_v4_0_load_microcode(struct amdgpu_device *adev)
amdgpu_ucode_print_sdma_hdr(&hdr->header);
fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
- version_major = le16_to_cpu(hdr->header.header_version_major);
- version_minor = le16_to_cpu(hdr->header.header_version_minor);
-
- if (version_major == 1 && version_minor >= 1) {
- const struct sdma_firmware_header_v1_1 *sdma_v1_1_hdr = (const struct sdma_firmware_header_v1_1 *) hdr;
- digest_size = le32_to_cpu(sdma_v1_1_hdr->digest_size);
- }
-
- fw_size -= digest_size;
-
fw_data = (const __le32 *)
(adev->sdma.instance[i].fw->data +
le32_to_cpu(hdr->header.ucode_array_offset_bytes));
WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_UCODE_ADDR), 0);
-
for (j = 0; j < fw_size; j++)
WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_UCODE_DATA), le32_to_cpup(fw_data++));
diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c
index 4267fa417997..8284d5dbfc30 100644
--- a/drivers/gpu/drm/amd/amdgpu/si.c
+++ b/drivers/gpu/drm/amd/amdgpu/si.c
@@ -1150,6 +1150,33 @@ static bool si_read_disabled_bios(struct amdgpu_device *adev)
return r;
}
+#define mmROM_INDEX 0x2A
+#define mmROM_DATA 0x2B
+
+static bool si_read_bios_from_rom(struct amdgpu_device *adev,
+ u8 *bios, u32 length_bytes)
+{
+ u32 *dw_ptr;
+ u32 i, length_dw;
+
+ if (bios == NULL)
+ return false;
+ if (length_bytes == 0)
+ return false;
+ /* APU vbios image is part of sbios image */
+ if (adev->flags & AMD_IS_APU)
+ return false;
+
+ dw_ptr = (u32 *)bios;
+ length_dw = ALIGN(length_bytes, 4) / 4;
+ /* set rom index to 0 */
+ WREG32(mmROM_INDEX, 0);
+ for (i = 0; i < length_dw; i++)
+ dw_ptr[i] = RREG32(mmROM_DATA);
+
+ return true;
+}
+
//xxx: not implemented
static int si_asic_reset(struct amdgpu_device *adev)
{
@@ -1206,6 +1233,7 @@ static void si_detect_hw_virtualization(struct amdgpu_device *adev)
static const struct amdgpu_asic_funcs si_asic_funcs =
{
.read_disabled_bios = &si_read_disabled_bios,
+ .read_bios_from_rom = &si_read_bios_from_rom,
.read_register = &si_read_register,
.reset = &si_asic_reset,
.set_vga_state = &si_vga_set_state,
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
index a7ad8390981c..d63873f3f574 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
@@ -2055,6 +2055,7 @@ static void si_initialize_powertune_defaults(struct amdgpu_device *adev)
case 0x682C:
si_pi->cac_weights = cac_weights_cape_verde_pro;
si_pi->dte_data = dte_data_sun_xt;
+ update_dte_from_pl2 = true;
break;
case 0x6825:
case 0x6827:
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index a7341d88a320..f2c3a49f73a0 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -25,7 +25,7 @@
#include <linux/module.h>
#include <drm/drmP.h>
#include "amdgpu.h"
-#include "amdgpu_atomfirmware.h"
+#include "amdgpu_atombios.h"
#include "amdgpu_ih.h"
#include "amdgpu_uvd.h"
#include "amdgpu_vce.h"
@@ -62,8 +62,6 @@
#include "dce_virtual.h"
#include "mxgpu_ai.h"
-MODULE_FIRMWARE("amdgpu/vega10_smc.bin");
-
#define mmFabricConfigAccessControl 0x0410
#define mmFabricConfigAccessControl_BASE_IDX 0
#define mmFabricConfigAccessControl_DEFAULT 0x00000000
@@ -198,6 +196,50 @@ static void soc15_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
}
+static u32 soc15_gc_cac_rreg(struct amdgpu_device *adev, u32 reg)
+{
+ unsigned long flags;
+ u32 r;
+
+ spin_lock_irqsave(&adev->gc_cac_idx_lock, flags);
+ WREG32_SOC15(GC, 0, mmGC_CAC_IND_INDEX, (reg));
+ r = RREG32_SOC15(GC, 0, mmGC_CAC_IND_DATA);
+ spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags);
+ return r;
+}
+
+static void soc15_gc_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&adev->gc_cac_idx_lock, flags);
+ WREG32_SOC15(GC, 0, mmGC_CAC_IND_INDEX, (reg));
+ WREG32_SOC15(GC, 0, mmGC_CAC_IND_DATA, (v));
+ spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags);
+}
+
+static u32 soc15_se_cac_rreg(struct amdgpu_device *adev, u32 reg)
+{
+ unsigned long flags;
+ u32 r;
+
+ spin_lock_irqsave(&adev->se_cac_idx_lock, flags);
+ WREG32_SOC15(GC, 0, mmSE_CAC_IND_INDEX, (reg));
+ r = RREG32_SOC15(GC, 0, mmSE_CAC_IND_DATA);
+ spin_unlock_irqrestore(&adev->se_cac_idx_lock, flags);
+ return r;
+}
+
+static void soc15_se_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&adev->se_cac_idx_lock, flags);
+ WREG32_SOC15(GC, 0, mmSE_CAC_IND_INDEX, (reg));
+ WREG32_SOC15(GC, 0, mmSE_CAC_IND_DATA, (v));
+ spin_unlock_irqrestore(&adev->se_cac_idx_lock, flags);
+}
+
static u32 soc15_get_config_memsize(struct amdgpu_device *adev)
{
if (adev->flags & AMD_IS_APU)
@@ -392,11 +434,11 @@ static void soc15_gpu_pci_config_reset(struct amdgpu_device *adev)
static int soc15_asic_reset(struct amdgpu_device *adev)
{
- amdgpu_atomfirmware_scratch_regs_engine_hung(adev, true);
+ amdgpu_atombios_scratch_regs_engine_hung(adev, true);
soc15_gpu_pci_config_reset(adev);
- amdgpu_atomfirmware_scratch_regs_engine_hung(adev, false);
+ amdgpu_atombios_scratch_regs_engine_hung(adev, false);
return 0;
}
@@ -524,13 +566,6 @@ static uint32_t soc15_get_rev_id(struct amdgpu_device *adev)
return nbio_v6_1_get_rev_id(adev);
}
-
-int gmc_v9_0_mc_wait_for_idle(struct amdgpu_device *adev)
-{
- /* to be implemented in MC IP*/
- return 0;
-}
-
static const struct amdgpu_asic_funcs soc15_asic_funcs =
{
.read_disabled_bios = &soc15_read_disabled_bios,
@@ -557,6 +592,10 @@ static int soc15_common_early_init(void *handle)
adev->uvd_ctx_wreg = &soc15_uvd_ctx_wreg;
adev->didt_rreg = &soc15_didt_rreg;
adev->didt_wreg = &soc15_didt_wreg;
+ adev->gc_cac_rreg = &soc15_gc_cac_rreg;
+ adev->gc_cac_wreg = &soc15_gc_cac_wreg;
+ adev->se_cac_rreg = &soc15_se_cac_rreg;
+ adev->se_cac_wreg = &soc15_se_cac_wreg;
adev->asic_funcs = &soc15_asic_funcs;
@@ -681,6 +720,9 @@ static int soc15_common_hw_init(void *handle)
soc15_pcie_gen3_enable(adev);
/* enable aspm */
soc15_program_aspm(adev);
+ /* setup nbio registers */
+ if (!(adev->flags & AMD_IS_APU))
+ nbio_v6_1_init_registers(adev);
/* enable the doorbell aperture */
soc15_enable_doorbell_aperture(adev, true);
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15_common.h b/drivers/gpu/drm/amd/amdgpu/soc15_common.h
index e2d330eed952..7a8e4e28abb2 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15_common.h
+++ b/drivers/gpu/drm/amd/amdgpu/soc15_common.h
@@ -77,6 +77,13 @@ struct nbio_pcie_index_data {
(3 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG3 + reg : \
(ip##_BASE__INST##inst##_SEG4 + reg))))), value)
+#define WREG32_SOC15_NO_KIQ(ip, inst, reg, value) \
+ WREG32_NO_KIQ( (0 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG0 + reg : \
+ (1 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG1 + reg : \
+ (2 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG2 + reg : \
+ (3 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG3 + reg : \
+ (ip##_BASE__INST##inst##_SEG4 + reg))))), value)
+
#define WREG32_SOC15_OFFSET(ip, inst, reg, offset, value) \
WREG32( (0 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG0 + reg : \
(1 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG1 + reg : \
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15d.h b/drivers/gpu/drm/amd/amdgpu/soc15d.h
index e79befd80eed..7f408f85fdb6 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15d.h
+++ b/drivers/gpu/drm/amd/amdgpu/soc15d.h
@@ -250,6 +250,7 @@
#define PACKET3_SET_UCONFIG_REG 0x79
#define PACKET3_SET_UCONFIG_REG_START 0x0000c000
#define PACKET3_SET_UCONFIG_REG_END 0x0000c400
+#define PACKET3_SET_UCONFIG_REG_INDEX_TYPE (2 << 28)
#define PACKET3_SCRATCH_RAM_WRITE 0x7D
#define PACKET3_SCRATCH_RAM_READ 0x7E
#define PACKET3_LOAD_CONST_RAM 0x80
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
index 987b958368ac..23a85750edd6 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
@@ -165,6 +165,9 @@ static int uvd_v7_0_enc_ring_test_ring(struct amdgpu_ring *ring)
unsigned i;
int r;
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
r = amdgpu_ring_alloc(ring, 16);
if (r) {
DRM_ERROR("amdgpu: uvd enc failed to lock ring %d (%d).\n",
@@ -432,13 +435,19 @@ static int uvd_v7_0_sw_init(void *handle)
return r;
}
-
for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
ring = &adev->uvd.ring_enc[i];
sprintf(ring->name, "uvd_enc%d", i);
if (amdgpu_sriov_vf(adev)) {
ring->use_doorbell = true;
- ring->doorbell_index = AMDGPU_DOORBELL64_UVD_RING0_1 * 2;
+
+ /* currently only use the first enconding ring for
+ * sriov, so set unused location for other unused rings.
+ */
+ if (i == 0)
+ ring->doorbell_index = AMDGPU_DOORBELL64_UVD_RING0_1 * 2;
+ else
+ ring->doorbell_index = AMDGPU_DOORBELL64_UVD_RING2_3 * 2 + 1;
}
r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0);
if (r)
@@ -685,6 +694,11 @@ static int uvd_v7_0_mmsch_start(struct amdgpu_device *adev,
/* 4, set resp to zero */
WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP, 0);
+ WDOORBELL32(adev->uvd.ring_enc[0].doorbell_index, 0);
+ adev->wb.wb[adev->uvd.ring_enc[0].wptr_offs] = 0;
+ adev->uvd.ring_enc[0].wptr = 0;
+ adev->uvd.ring_enc[0].wptr_old = 0;
+
/* 5, kick off the initialization and wait until VCE_MMSCH_VF_MAILBOX_RESP becomes non-zero */
WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_HOST, 0x10000001);
@@ -702,7 +716,6 @@ static int uvd_v7_0_mmsch_start(struct amdgpu_device *adev,
dev_err(adev->dev, "failed to init MMSCH, mmVCE_MMSCH_VF_MAILBOX_RESP = %x\n", data);
return -EBUSY;
}
- WDOORBELL32(adev->uvd.ring_enc[0].doorbell_index, 0);
return 0;
}
@@ -736,11 +749,9 @@ static int uvd_v7_0_sriov_start(struct amdgpu_device *adev)
init_table += header->uvd_table_offset;
ring = &adev->uvd.ring;
+ ring->wptr = 0;
size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->size + 4);
- /* disable clock gating */
- MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS),
- ~UVD_POWER_STATUS__UVD_PG_MODE_MASK, 0);
MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_STATUS),
0xFFFFFFFF, 0x00000004);
/* mc resume*/
@@ -777,12 +788,6 @@ static int uvd_v7_0_sriov_start(struct amdgpu_device *adev)
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_SIZE2),
AMDGPU_UVD_STACK_SIZE + (AMDGPU_UVD_SESSION_SIZE * 40));
- MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_UDEC_ADDR_CONFIG),
- adev->gfx.config.gb_addr_config);
- MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_UDEC_DB_ADDR_CONFIG),
- adev->gfx.config.gb_addr_config);
- MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_UDEC_DBW_ADDR_CONFIG),
- adev->gfx.config.gb_addr_config);
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_GP_SCRATCH4), adev->uvd.max_handles);
/* mc resume end*/
@@ -819,17 +824,6 @@ static int uvd_v7_0_sriov_start(struct amdgpu_device *adev)
UVD_LMI_CTRL__REQ_MODE_MASK |
0x00100000L));
- /* disable byte swapping */
- MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_SWAP_CNTL), 0);
- MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_MP_SWAP_CNTL), 0);
-
- MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_MPC_SET_MUXA0), 0x40c2040);
- MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_MPC_SET_MUXA1), 0x0);
- MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_MPC_SET_MUXB0), 0x40c2040);
- MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_MPC_SET_MUXB1), 0x0);
- MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_MPC_SET_ALU), 0);
- MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_MPC_SET_MUX), 0x88);
-
/* take all subblocks out of reset, except VCPU */
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
@@ -838,15 +832,6 @@ static int uvd_v7_0_sriov_start(struct amdgpu_device *adev)
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CNTL),
UVD_VCPU_CNTL__CLK_EN_MASK);
- /* enable UMC */
- MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2),
- ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK, 0);
-
- /* boot up the VCPU */
- MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), 0);
-
- MMSCH_V1_0_INSERT_DIRECT_POLL(SOC15_REG_OFFSET(UVD, 0, mmUVD_STATUS), 0x02, 0x02);
-
/* enable master interrupt */
MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN),
~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK),
@@ -859,40 +844,31 @@ static int uvd_v7_0_sriov_start(struct amdgpu_device *adev)
/* force RBC into idle state */
size = order_base_2(ring->ring_size);
tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, size);
- tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
- tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0);
- tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
- tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_CNTL), tmp);
- /* set the write pointer delay */
- MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_WPTR_CNTL), 0);
-
- /* set the wb address */
- MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_RPTR_ADDR),
- (upper_32_bits(ring->gpu_addr) >> 2));
-
- /* programm the RB_BASE for ring buffer */
- MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW),
- lower_32_bits(ring->gpu_addr));
- MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH),
- upper_32_bits(ring->gpu_addr));
-
- ring->wptr = 0;
ring = &adev->uvd.ring_enc[0];
+ ring->wptr = 0;
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_BASE_LO), ring->gpu_addr);
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_BASE_HI), upper_32_bits(ring->gpu_addr));
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_SIZE), ring->ring_size / 4);
+ /* boot up the VCPU */
+ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), 0);
+
+ /* enable UMC */
+ MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2),
+ ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK, 0);
+
+ MMSCH_V1_0_INSERT_DIRECT_POLL(SOC15_REG_OFFSET(UVD, 0, mmUVD_STATUS), 0x02, 0x02);
+
/* add end packet */
memcpy((void *)init_table, &end, sizeof(struct mmsch_v1_0_cmd_end));
table_size += sizeof(struct mmsch_v1_0_cmd_end) / 4;
header->uvd_table_size = table_size;
- return uvd_v7_0_mmsch_start(adev, &adev->virt.mm_table);
}
- return -EINVAL; /* already initializaed ? */
+ return uvd_v7_0_mmsch_start(adev, &adev->virt.mm_table);
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
index 1ecd6bb90c1f..11134d5f7443 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
@@ -173,6 +173,11 @@ static int vce_v4_0_mmsch_start(struct amdgpu_device *adev,
/* 4, set resp to zero */
WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP), 0);
+ WDOORBELL32(adev->vce.ring[0].doorbell_index, 0);
+ adev->wb.wb[adev->vce.ring[0].wptr_offs] = 0;
+ adev->vce.ring[0].wptr = 0;
+ adev->vce.ring[0].wptr_old = 0;
+
/* 5, kick off the initialization and wait until VCE_MMSCH_VF_MAILBOX_RESP becomes non-zero */
WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_HOST), 0x10000001);
@@ -190,7 +195,6 @@ static int vce_v4_0_mmsch_start(struct amdgpu_device *adev,
dev_err(adev->dev, "failed to init MMSCH, mmVCE_MMSCH_VF_MAILBOX_RESP = %x\n", data);
return -EBUSY;
}
- WDOORBELL32(adev->vce.ring[0].doorbell_index, 0);
return 0;
}
@@ -274,7 +278,8 @@ static int vce_v4_0_sriov_start(struct amdgpu_device *adev)
MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_CTRL2), ~0x100, 0);
MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_SYS_INT_EN),
- 0xffffffff, VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK);
+ VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK,
+ VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK);
/* end of MC_RESUME */
MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_STATUS),
@@ -296,11 +301,9 @@ static int vce_v4_0_sriov_start(struct amdgpu_device *adev)
memcpy((void *)init_table, &end, sizeof(struct mmsch_v1_0_cmd_end));
table_size += sizeof(struct mmsch_v1_0_cmd_end) / 4;
header->vce_table_size = table_size;
-
- return vce_v4_0_mmsch_start(adev, &adev->virt.mm_table);
}
- return -EINVAL; /* already initializaed ? */
+ return vce_v4_0_mmsch_start(adev, &adev->virt.mm_table);
}
/**
@@ -443,12 +446,14 @@ static int vce_v4_0_sw_init(void *handle)
if (amdgpu_sriov_vf(adev)) {
/* DOORBELL only works under SRIOV */
ring->use_doorbell = true;
+
+ /* currently only use the first encoding ring for sriov,
+ * so set unused location for other unused rings.
+ */
if (i == 0)
- ring->doorbell_index = AMDGPU_DOORBELL64_RING0_1 * 2;
- else if (i == 1)
- ring->doorbell_index = AMDGPU_DOORBELL64_RING2_3 * 2;
+ ring->doorbell_index = AMDGPU_DOORBELL64_VCE_RING0_1 * 2;
else
- ring->doorbell_index = AMDGPU_DOORBELL64_RING2_3 * 2 + 1;
+ ring->doorbell_index = AMDGPU_DOORBELL64_VCE_RING2_3 * 2 + 1;
}
r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0);
if (r)
@@ -990,11 +995,13 @@ static int vce_v4_0_set_interrupt_state(struct amdgpu_device *adev,
{
uint32_t val = 0;
- if (state == AMDGPU_IRQ_STATE_ENABLE)
- val |= VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK;
+ if (!amdgpu_sriov_vf(adev)) {
+ if (state == AMDGPU_IRQ_STATE_ENABLE)
+ val |= VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK;
- WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_SYS_INT_EN), val,
- ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK);
+ WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_SYS_INT_EN), val,
+ ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK);
+ }
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 6cac291c96da..9ff69b90df36 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -1028,8 +1028,7 @@ static int vi_common_early_init(void *handle)
/* rev0 hardware requires workarounds to support PG */
adev->pg_flags = 0;
if (adev->rev_id != 0x00 || CZ_REV_BRISTOL(adev->pdev->revision)) {
- adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
- AMD_PG_SUPPORT_GFX_SMG |
+ adev->pg_flags |= AMD_PG_SUPPORT_GFX_SMG |
AMD_PG_SUPPORT_GFX_PIPELINE |
AMD_PG_SUPPORT_CP |
AMD_PG_SUPPORT_UVD |
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index 6316aad43a73..e4a8c2e52cb2 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -142,12 +142,12 @@ static int set_queue_properties_from_user(struct queue_properties *q_properties,
struct kfd_ioctl_create_queue_args *args)
{
if (args->queue_percentage > KFD_MAX_QUEUE_PERCENTAGE) {
- pr_err("kfd: queue percentage must be between 0 to KFD_MAX_QUEUE_PERCENTAGE\n");
+ pr_err("Queue percentage must be between 0 to KFD_MAX_QUEUE_PERCENTAGE\n");
return -EINVAL;
}
if (args->queue_priority > KFD_MAX_QUEUE_PRIORITY) {
- pr_err("kfd: queue priority must be between 0 to KFD_MAX_QUEUE_PRIORITY\n");
+ pr_err("Queue priority must be between 0 to KFD_MAX_QUEUE_PRIORITY\n");
return -EINVAL;
}
@@ -155,26 +155,26 @@ static int set_queue_properties_from_user(struct queue_properties *q_properties,
(!access_ok(VERIFY_WRITE,
(const void __user *) args->ring_base_address,
sizeof(uint64_t)))) {
- pr_err("kfd: can't access ring base address\n");
+ pr_err("Can't access ring base address\n");
return -EFAULT;
}
if (!is_power_of_2(args->ring_size) && (args->ring_size != 0)) {
- pr_err("kfd: ring size must be a power of 2 or 0\n");
+ pr_err("Ring size must be a power of 2 or 0\n");
return -EINVAL;
}
if (!access_ok(VERIFY_WRITE,
(const void __user *) args->read_pointer_address,
sizeof(uint32_t))) {
- pr_err("kfd: can't access read pointer\n");
+ pr_err("Can't access read pointer\n");
return -EFAULT;
}
if (!access_ok(VERIFY_WRITE,
(const void __user *) args->write_pointer_address,
sizeof(uint32_t))) {
- pr_err("kfd: can't access write pointer\n");
+ pr_err("Can't access write pointer\n");
return -EFAULT;
}
@@ -182,7 +182,7 @@ static int set_queue_properties_from_user(struct queue_properties *q_properties,
!access_ok(VERIFY_WRITE,
(const void __user *) args->eop_buffer_address,
sizeof(uint32_t))) {
- pr_debug("kfd: can't access eop buffer");
+ pr_debug("Can't access eop buffer");
return -EFAULT;
}
@@ -190,7 +190,7 @@ static int set_queue_properties_from_user(struct queue_properties *q_properties,
!access_ok(VERIFY_WRITE,
(const void __user *) args->ctx_save_restore_address,
sizeof(uint32_t))) {
- pr_debug("kfd: can't access ctx save restore buffer");
+ pr_debug("Can't access ctx save restore buffer");
return -EFAULT;
}
@@ -219,27 +219,27 @@ static int set_queue_properties_from_user(struct queue_properties *q_properties,
else
q_properties->format = KFD_QUEUE_FORMAT_PM4;
- pr_debug("Queue Percentage (%d, %d)\n",
+ pr_debug("Queue Percentage: %d, %d\n",
q_properties->queue_percent, args->queue_percentage);
- pr_debug("Queue Priority (%d, %d)\n",
+ pr_debug("Queue Priority: %d, %d\n",
q_properties->priority, args->queue_priority);
- pr_debug("Queue Address (0x%llX, 0x%llX)\n",
+ pr_debug("Queue Address: 0x%llX, 0x%llX\n",
q_properties->queue_address, args->ring_base_address);
- pr_debug("Queue Size (0x%llX, %u)\n",
+ pr_debug("Queue Size: 0x%llX, %u\n",
q_properties->queue_size, args->ring_size);
- pr_debug("Queue r/w Pointers (0x%llX, 0x%llX)\n",
- (uint64_t) q_properties->read_ptr,
- (uint64_t) q_properties->write_ptr);
+ pr_debug("Queue r/w Pointers: %p, %p\n",
+ q_properties->read_ptr,
+ q_properties->write_ptr);
- pr_debug("Queue Format (%d)\n", q_properties->format);
+ pr_debug("Queue Format: %d\n", q_properties->format);
- pr_debug("Queue EOP (0x%llX)\n", q_properties->eop_ring_buffer_address);
+ pr_debug("Queue EOP: 0x%llX\n", q_properties->eop_ring_buffer_address);
- pr_debug("Queue CTX save arex (0x%llX)\n",
+ pr_debug("Queue CTX save area: 0x%llX\n",
q_properties->ctx_save_restore_area_address);
return 0;
@@ -257,16 +257,16 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
memset(&q_properties, 0, sizeof(struct queue_properties));
- pr_debug("kfd: creating queue ioctl\n");
+ pr_debug("Creating queue ioctl\n");
err = set_queue_properties_from_user(&q_properties, args);
if (err)
return err;
- pr_debug("kfd: looking for gpu id 0x%x\n", args->gpu_id);
+ pr_debug("Looking for gpu id 0x%x\n", args->gpu_id);
dev = kfd_device_by_id(args->gpu_id);
- if (dev == NULL) {
- pr_debug("kfd: gpu id 0x%x was not found\n", args->gpu_id);
+ if (!dev) {
+ pr_debug("Could not find gpu id 0x%x\n", args->gpu_id);
return -EINVAL;
}
@@ -278,7 +278,7 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
goto err_bind_process;
}
- pr_debug("kfd: creating queue for PASID %d on GPU 0x%x\n",
+ pr_debug("Creating queue for PASID %d on gpu 0x%x\n",
p->pasid,
dev->id);
@@ -296,15 +296,15 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
mutex_unlock(&p->mutex);
- pr_debug("kfd: queue id %d was created successfully\n", args->queue_id);
+ pr_debug("Queue id %d was created successfully\n", args->queue_id);
- pr_debug("ring buffer address == 0x%016llX\n",
+ pr_debug("Ring buffer address == 0x%016llX\n",
args->ring_base_address);
- pr_debug("read ptr address == 0x%016llX\n",
+ pr_debug("Read ptr address == 0x%016llX\n",
args->read_pointer_address);
- pr_debug("write ptr address == 0x%016llX\n",
+ pr_debug("Write ptr address == 0x%016llX\n",
args->write_pointer_address);
return 0;
@@ -321,7 +321,7 @@ static int kfd_ioctl_destroy_queue(struct file *filp, struct kfd_process *p,
int retval;
struct kfd_ioctl_destroy_queue_args *args = data;
- pr_debug("kfd: destroying queue id %d for PASID %d\n",
+ pr_debug("Destroying queue id %d for pasid %d\n",
args->queue_id,
p->pasid);
@@ -341,12 +341,12 @@ static int kfd_ioctl_update_queue(struct file *filp, struct kfd_process *p,
struct queue_properties properties;
if (args->queue_percentage > KFD_MAX_QUEUE_PERCENTAGE) {
- pr_err("kfd: queue percentage must be between 0 to KFD_MAX_QUEUE_PERCENTAGE\n");
+ pr_err("Queue percentage must be between 0 to KFD_MAX_QUEUE_PERCENTAGE\n");
return -EINVAL;
}
if (args->queue_priority > KFD_MAX_QUEUE_PRIORITY) {
- pr_err("kfd: queue priority must be between 0 to KFD_MAX_QUEUE_PRIORITY\n");
+ pr_err("Queue priority must be between 0 to KFD_MAX_QUEUE_PRIORITY\n");
return -EINVAL;
}
@@ -354,12 +354,12 @@ static int kfd_ioctl_update_queue(struct file *filp, struct kfd_process *p,
(!access_ok(VERIFY_WRITE,
(const void __user *) args->ring_base_address,
sizeof(uint64_t)))) {
- pr_err("kfd: can't access ring base address\n");
+ pr_err("Can't access ring base address\n");
return -EFAULT;
}
if (!is_power_of_2(args->ring_size) && (args->ring_size != 0)) {
- pr_err("kfd: ring size must be a power of 2 or 0\n");
+ pr_err("Ring size must be a power of 2 or 0\n");
return -EINVAL;
}
@@ -368,7 +368,7 @@ static int kfd_ioctl_update_queue(struct file *filp, struct kfd_process *p,
properties.queue_percent = args->queue_percentage;
properties.priority = args->queue_priority;
- pr_debug("kfd: updating queue id %d for PASID %d\n",
+ pr_debug("Updating queue id %d for pasid %d\n",
args->queue_id, p->pasid);
mutex_lock(&p->mutex);
@@ -400,7 +400,7 @@ static int kfd_ioctl_set_memory_policy(struct file *filep,
}
dev = kfd_device_by_id(args->gpu_id);
- if (dev == NULL)
+ if (!dev)
return -EINVAL;
mutex_lock(&p->mutex);
@@ -443,7 +443,7 @@ static int kfd_ioctl_dbg_register(struct file *filep,
long status = 0;
dev = kfd_device_by_id(args->gpu_id);
- if (dev == NULL)
+ if (!dev)
return -EINVAL;
if (dev->device_info->asic_family == CHIP_CARRIZO) {
@@ -460,12 +460,11 @@ static int kfd_ioctl_dbg_register(struct file *filep,
*/
pdd = kfd_bind_process_to_device(dev, p);
if (IS_ERR(pdd)) {
- mutex_unlock(&p->mutex);
- mutex_unlock(kfd_get_dbgmgr_mutex());
- return PTR_ERR(pdd);
+ status = PTR_ERR(pdd);
+ goto out;
}
- if (dev->dbgmgr == NULL) {
+ if (!dev->dbgmgr) {
/* In case of a legal call, we have no dbgmgr yet */
create_ok = kfd_dbgmgr_create(&dbgmgr_ptr, dev);
if (create_ok) {
@@ -480,6 +479,7 @@ static int kfd_ioctl_dbg_register(struct file *filep,
status = -EINVAL;
}
+out:
mutex_unlock(&p->mutex);
mutex_unlock(kfd_get_dbgmgr_mutex());
@@ -494,7 +494,7 @@ static int kfd_ioctl_dbg_unregister(struct file *filep,
long status;
dev = kfd_device_by_id(args->gpu_id);
- if (dev == NULL)
+ if (!dev)
return -EINVAL;
if (dev->device_info->asic_family == CHIP_CARRIZO) {
@@ -505,7 +505,7 @@ static int kfd_ioctl_dbg_unregister(struct file *filep,
mutex_lock(kfd_get_dbgmgr_mutex());
status = kfd_dbgmgr_unregister(dev->dbgmgr, p);
- if (status == 0) {
+ if (!status) {
kfd_dbgmgr_destroy(dev->dbgmgr);
dev->dbgmgr = NULL;
}
@@ -539,7 +539,7 @@ static int kfd_ioctl_dbg_address_watch(struct file *filep,
memset((void *) &aw_info, 0, sizeof(struct dbg_address_watch_info));
dev = kfd_device_by_id(args->gpu_id);
- if (dev == NULL)
+ if (!dev)
return -EINVAL;
if (dev->device_info->asic_family == CHIP_CARRIZO) {
@@ -580,8 +580,8 @@ static int kfd_ioctl_dbg_address_watch(struct file *filep,
args_idx += sizeof(aw_info.watch_address) * aw_info.num_watch_points;
if (args_idx >= args->buf_size_in_bytes - sizeof(*args)) {
- kfree(args_buff);
- return -EINVAL;
+ status = -EINVAL;
+ goto out;
}
watch_mask_value = (uint64_t) args_buff[args_idx];
@@ -604,8 +604,8 @@ static int kfd_ioctl_dbg_address_watch(struct file *filep,
}
if (args_idx >= args->buf_size_in_bytes - sizeof(args)) {
- kfree(args_buff);
- return -EINVAL;
+ status = -EINVAL;
+ goto out;
}
/* Currently HSA Event is not supported for DBG */
@@ -617,6 +617,7 @@ static int kfd_ioctl_dbg_address_watch(struct file *filep,
mutex_unlock(kfd_get_dbgmgr_mutex());
+out:
kfree(args_buff);
return status;
@@ -646,7 +647,7 @@ static int kfd_ioctl_dbg_wave_control(struct file *filep,
sizeof(wac_info.trapId);
dev = kfd_device_by_id(args->gpu_id);
- if (dev == NULL)
+ if (!dev)
return -EINVAL;
if (dev->device_info->asic_family == CHIP_CARRIZO) {
@@ -782,8 +783,9 @@ static int kfd_ioctl_get_process_apertures(struct file *filp,
"scratch_limit %llX\n", pdd->scratch_limit);
args->num_of_nodes++;
- } while ((pdd = kfd_get_next_process_device_data(p, pdd)) != NULL &&
- (args->num_of_nodes < NUM_OF_SUPPORTED_GPUS));
+
+ pdd = kfd_get_next_process_device_data(p, pdd);
+ } while (pdd && (args->num_of_nodes < NUM_OF_SUPPORTED_GPUS));
}
mutex_unlock(&p->mutex);
@@ -846,9 +848,84 @@ static int kfd_ioctl_wait_events(struct file *filp, struct kfd_process *p,
return err;
}
+static int kfd_ioctl_set_scratch_backing_va(struct file *filep,
+ struct kfd_process *p, void *data)
+{
+ struct kfd_ioctl_set_scratch_backing_va_args *args = data;
+ struct kfd_process_device *pdd;
+ struct kfd_dev *dev;
+ long err;
+
+ dev = kfd_device_by_id(args->gpu_id);
+ if (!dev)
+ return -EINVAL;
+
+ mutex_lock(&p->mutex);
+
+ pdd = kfd_bind_process_to_device(dev, p);
+ if (IS_ERR(pdd)) {
+ err = PTR_ERR(pdd);
+ goto bind_process_to_device_fail;
+ }
+
+ pdd->qpd.sh_hidden_private_base = args->va_addr;
+
+ mutex_unlock(&p->mutex);
+
+ if (sched_policy == KFD_SCHED_POLICY_NO_HWS && pdd->qpd.vmid != 0)
+ dev->kfd2kgd->set_scratch_backing_va(
+ dev->kgd, args->va_addr, pdd->qpd.vmid);
+
+ return 0;
+
+bind_process_to_device_fail:
+ mutex_unlock(&p->mutex);
+ return err;
+}
+
+static int kfd_ioctl_get_tile_config(struct file *filep,
+ struct kfd_process *p, void *data)
+{
+ struct kfd_ioctl_get_tile_config_args *args = data;
+ struct kfd_dev *dev;
+ struct tile_config config;
+ int err = 0;
+
+ dev = kfd_device_by_id(args->gpu_id);
+
+ dev->kfd2kgd->get_tile_config(dev->kgd, &config);
+
+ args->gb_addr_config = config.gb_addr_config;
+ args->num_banks = config.num_banks;
+ args->num_ranks = config.num_ranks;
+
+ if (args->num_tile_configs > config.num_tile_configs)
+ args->num_tile_configs = config.num_tile_configs;
+ err = copy_to_user((void __user *)args->tile_config_ptr,
+ config.tile_config_ptr,
+ args->num_tile_configs * sizeof(uint32_t));
+ if (err) {
+ args->num_tile_configs = 0;
+ return -EFAULT;
+ }
+
+ if (args->num_macro_tile_configs > config.num_macro_tile_configs)
+ args->num_macro_tile_configs =
+ config.num_macro_tile_configs;
+ err = copy_to_user((void __user *)args->macro_tile_config_ptr,
+ config.macro_tile_config_ptr,
+ args->num_macro_tile_configs * sizeof(uint32_t));
+ if (err) {
+ args->num_macro_tile_configs = 0;
+ return -EFAULT;
+ }
+
+ return 0;
+}
#define AMDKFD_IOCTL_DEF(ioctl, _func, _flags) \
- [_IOC_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, .cmd_drv = 0, .name = #ioctl}
+ [_IOC_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, \
+ .cmd_drv = 0, .name = #ioctl}
/** Ioctl table */
static const struct amdkfd_ioctl_desc amdkfd_ioctls[] = {
@@ -899,6 +976,12 @@ static const struct amdkfd_ioctl_desc amdkfd_ioctls[] = {
AMDKFD_IOCTL_DEF(AMDKFD_IOC_DBG_WAVE_CONTROL,
kfd_ioctl_dbg_wave_control, 0),
+
+ AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_SCRATCH_BACKING_VA,
+ kfd_ioctl_set_scratch_backing_va, 0),
+
+ AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_TILE_CONFIG,
+ kfd_ioctl_get_tile_config, 0)
};
#define AMDKFD_CORE_IOCTL_COUNT ARRAY_SIZE(amdkfd_ioctls)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
index d5e19b5fbbfb..0aa021aa0aa1 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
@@ -42,8 +42,6 @@
static void dbgdev_address_watch_disable_nodiq(struct kfd_dev *dev)
{
- BUG_ON(!dev || !dev->kfd2kgd);
-
dev->kfd2kgd->address_watch_disable(dev->kgd);
}
@@ -62,7 +60,8 @@ static int dbgdev_diq_submit_ib(struct kfd_dbgdev *dbgdev,
unsigned int *ib_packet_buff;
int status;
- BUG_ON(!dbgdev || !dbgdev->kq || !packet_buff || !size_in_bytes);
+ if (WARN_ON(!size_in_bytes))
+ return -EINVAL;
kq = dbgdev->kq;
@@ -77,8 +76,8 @@ static int dbgdev_diq_submit_ib(struct kfd_dbgdev *dbgdev,
status = kq->ops.acquire_packet_buffer(kq,
pq_packets_size_in_bytes / sizeof(uint32_t),
&ib_packet_buff);
- if (status != 0) {
- pr_err("amdkfd: acquire_packet_buffer failed\n");
+ if (status) {
+ pr_err("acquire_packet_buffer failed\n");
return status;
}
@@ -115,8 +114,8 @@ static int dbgdev_diq_submit_ib(struct kfd_dbgdev *dbgdev,
status = kfd_gtt_sa_allocate(dbgdev->dev, sizeof(uint64_t),
&mem_obj);
- if (status != 0) {
- pr_err("amdkfd: Failed to allocate GART memory\n");
+ if (status) {
+ pr_err("Failed to allocate GART memory\n");
kq->ops.rollback_packet(kq);
return status;
}
@@ -168,8 +167,6 @@ static int dbgdev_diq_submit_ib(struct kfd_dbgdev *dbgdev,
static int dbgdev_register_nodiq(struct kfd_dbgdev *dbgdev)
{
- BUG_ON(!dbgdev);
-
/*
* no action is needed in this case,
* just make sure diq will not be used
@@ -187,14 +184,12 @@ static int dbgdev_register_diq(struct kfd_dbgdev *dbgdev)
struct kernel_queue *kq = NULL;
int status;
- BUG_ON(!dbgdev || !dbgdev->pqm || !dbgdev->dev);
-
status = pqm_create_queue(dbgdev->pqm, dbgdev->dev, NULL,
&properties, 0, KFD_QUEUE_TYPE_DIQ,
&qid);
if (status) {
- pr_err("amdkfd: Failed to create DIQ\n");
+ pr_err("Failed to create DIQ\n");
return status;
}
@@ -202,8 +197,8 @@ static int dbgdev_register_diq(struct kfd_dbgdev *dbgdev)
kq = pqm_get_kernel_queue(dbgdev->pqm, qid);
- if (kq == NULL) {
- pr_err("amdkfd: Error getting DIQ\n");
+ if (!kq) {
+ pr_err("Error getting DIQ\n");
pqm_destroy_queue(dbgdev->pqm, qid);
return -EFAULT;
}
@@ -215,8 +210,6 @@ static int dbgdev_register_diq(struct kfd_dbgdev *dbgdev)
static int dbgdev_unregister_nodiq(struct kfd_dbgdev *dbgdev)
{
- BUG_ON(!dbgdev || !dbgdev->dev);
-
/* disable watch address */
dbgdev_address_watch_disable_nodiq(dbgdev->dev);
return 0;
@@ -227,8 +220,6 @@ static int dbgdev_unregister_diq(struct kfd_dbgdev *dbgdev)
/* todo - disable address watch */
int status;
- BUG_ON(!dbgdev || !dbgdev->pqm || !dbgdev->kq);
-
status = pqm_destroy_queue(dbgdev->pqm,
dbgdev->kq->queue->properties.queue_id);
dbgdev->kq = NULL;
@@ -245,14 +236,12 @@ static void dbgdev_address_watch_set_registers(
{
union ULARGE_INTEGER addr;
- BUG_ON(!adw_info || !addrHi || !addrLo || !cntl);
-
addr.quad_part = 0;
addrHi->u32All = 0;
addrLo->u32All = 0;
cntl->u32All = 0;
- if (adw_info->watch_mask != NULL)
+ if (adw_info->watch_mask)
cntl->bitfields.mask =
(uint32_t) (adw_info->watch_mask[index] &
ADDRESS_WATCH_REG_CNTL_DEFAULT_MASK);
@@ -279,7 +268,7 @@ static void dbgdev_address_watch_set_registers(
}
static int dbgdev_address_watch_nodiq(struct kfd_dbgdev *dbgdev,
- struct dbg_address_watch_info *adw_info)
+ struct dbg_address_watch_info *adw_info)
{
union TCP_WATCH_ADDR_H_BITS addrHi;
union TCP_WATCH_ADDR_L_BITS addrLo;
@@ -287,13 +276,11 @@ static int dbgdev_address_watch_nodiq(struct kfd_dbgdev *dbgdev,
struct kfd_process_device *pdd;
unsigned int i;
- BUG_ON(!dbgdev || !dbgdev->dev || !adw_info);
-
/* taking the vmid for that process on the safe way using pdd */
pdd = kfd_get_process_device_data(dbgdev->dev,
adw_info->process);
if (!pdd) {
- pr_err("amdkfd: Failed to get pdd for wave control no DIQ\n");
+ pr_err("Failed to get pdd for wave control no DIQ\n");
return -EFAULT;
}
@@ -303,17 +290,16 @@ static int dbgdev_address_watch_nodiq(struct kfd_dbgdev *dbgdev,
if ((adw_info->num_watch_points > MAX_WATCH_ADDRESSES) ||
(adw_info->num_watch_points == 0)) {
- pr_err("amdkfd: num_watch_points is invalid\n");
+ pr_err("num_watch_points is invalid\n");
return -EINVAL;
}
- if ((adw_info->watch_mode == NULL) ||
- (adw_info->watch_address == NULL)) {
- pr_err("amdkfd: adw_info fields are not valid\n");
+ if (!adw_info->watch_mode || !adw_info->watch_address) {
+ pr_err("adw_info fields are not valid\n");
return -EINVAL;
}
- for (i = 0 ; i < adw_info->num_watch_points ; i++) {
+ for (i = 0; i < adw_info->num_watch_points; i++) {
dbgdev_address_watch_set_registers(adw_info, &addrHi, &addrLo,
&cntl, i, pdd->qpd.vmid);
@@ -348,7 +334,7 @@ static int dbgdev_address_watch_nodiq(struct kfd_dbgdev *dbgdev,
}
static int dbgdev_address_watch_diq(struct kfd_dbgdev *dbgdev,
- struct dbg_address_watch_info *adw_info)
+ struct dbg_address_watch_info *adw_info)
{
struct pm4__set_config_reg *packets_vec;
union TCP_WATCH_ADDR_H_BITS addrHi;
@@ -363,28 +349,25 @@ static int dbgdev_address_watch_diq(struct kfd_dbgdev *dbgdev,
/* we do not control the vmid in DIQ mode, just a place holder */
unsigned int vmid = 0;
- BUG_ON(!dbgdev || !dbgdev->dev || !adw_info);
-
addrHi.u32All = 0;
addrLo.u32All = 0;
cntl.u32All = 0;
if ((adw_info->num_watch_points > MAX_WATCH_ADDRESSES) ||
(adw_info->num_watch_points == 0)) {
- pr_err("amdkfd: num_watch_points is invalid\n");
+ pr_err("num_watch_points is invalid\n");
return -EINVAL;
}
- if ((NULL == adw_info->watch_mode) ||
- (NULL == adw_info->watch_address)) {
- pr_err("amdkfd: adw_info fields are not valid\n");
+ if (!adw_info->watch_mode || !adw_info->watch_address) {
+ pr_err("adw_info fields are not valid\n");
return -EINVAL;
}
status = kfd_gtt_sa_allocate(dbgdev->dev, ib_size, &mem_obj);
- if (status != 0) {
- pr_err("amdkfd: Failed to allocate GART memory\n");
+ if (status) {
+ pr_err("Failed to allocate GART memory\n");
return status;
}
@@ -442,8 +425,6 @@ static int dbgdev_address_watch_diq(struct kfd_dbgdev *dbgdev,
i,
ADDRESS_WATCH_REG_CNTL);
- aw_reg_add_dword /= sizeof(uint32_t);
-
packets_vec[0].bitfields2.reg_offset =
aw_reg_add_dword - AMD_CONFIG_REG_BASE;
@@ -455,8 +436,6 @@ static int dbgdev_address_watch_diq(struct kfd_dbgdev *dbgdev,
i,
ADDRESS_WATCH_REG_ADDR_HI);
- aw_reg_add_dword /= sizeof(uint32_t);
-
packets_vec[1].bitfields2.reg_offset =
aw_reg_add_dword - AMD_CONFIG_REG_BASE;
packets_vec[1].reg_data[0] = addrHi.u32All;
@@ -467,8 +446,6 @@ static int dbgdev_address_watch_diq(struct kfd_dbgdev *dbgdev,
i,
ADDRESS_WATCH_REG_ADDR_LO);
- aw_reg_add_dword /= sizeof(uint32_t);
-
packets_vec[2].bitfields2.reg_offset =
aw_reg_add_dword - AMD_CONFIG_REG_BASE;
packets_vec[2].reg_data[0] = addrLo.u32All;
@@ -485,8 +462,6 @@ static int dbgdev_address_watch_diq(struct kfd_dbgdev *dbgdev,
i,
ADDRESS_WATCH_REG_CNTL);
- aw_reg_add_dword /= sizeof(uint32_t);
-
packets_vec[3].bitfields2.reg_offset =
aw_reg_add_dword - AMD_CONFIG_REG_BASE;
packets_vec[3].reg_data[0] = cntl.u32All;
@@ -498,8 +473,8 @@ static int dbgdev_address_watch_diq(struct kfd_dbgdev *dbgdev,
packet_buff_uint,
ib_size);
- if (status != 0) {
- pr_err("amdkfd: Failed to submit IB to DIQ\n");
+ if (status) {
+ pr_err("Failed to submit IB to DIQ\n");
break;
}
}
@@ -518,8 +493,6 @@ static int dbgdev_wave_control_set_registers(
union GRBM_GFX_INDEX_BITS reg_gfx_index;
struct HsaDbgWaveMsgAMDGen2 *pMsg;
- BUG_ON(!wac_info || !in_reg_sq_cmd || !in_reg_gfx_index);
-
reg_sq_cmd.u32All = 0;
reg_gfx_index.u32All = 0;
pMsg = &wac_info->dbgWave_msg.DbgWaveMsg.WaveMsgInfoGen2;
@@ -620,18 +593,16 @@ static int dbgdev_wave_control_diq(struct kfd_dbgdev *dbgdev,
struct pm4__set_config_reg *packets_vec;
size_t ib_size = sizeof(struct pm4__set_config_reg) * 3;
- BUG_ON(!dbgdev || !wac_info);
-
reg_sq_cmd.u32All = 0;
status = dbgdev_wave_control_set_registers(wac_info, &reg_sq_cmd,
&reg_gfx_index);
if (status) {
- pr_err("amdkfd: Failed to set wave control registers\n");
+ pr_err("Failed to set wave control registers\n");
return status;
}
- /* we do not control the VMID in DIQ,so reset it to a known value */
+ /* we do not control the VMID in DIQ, so reset it to a known value */
reg_sq_cmd.bits.vm_id = 0;
pr_debug("\t\t %30s\n", "* * * * * * * * * * * * * * * * * *");
@@ -667,7 +638,7 @@ static int dbgdev_wave_control_diq(struct kfd_dbgdev *dbgdev,
status = kfd_gtt_sa_allocate(dbgdev->dev, ib_size, &mem_obj);
if (status != 0) {
- pr_err("amdkfd: Failed to allocate GART memory\n");
+ pr_err("Failed to allocate GART memory\n");
return status;
}
@@ -719,8 +690,8 @@ static int dbgdev_wave_control_diq(struct kfd_dbgdev *dbgdev,
packet_buff_uint,
ib_size);
- if (status != 0)
- pr_err("amdkfd: Failed to submit IB to DIQ\n");
+ if (status)
+ pr_err("Failed to submit IB to DIQ\n");
kfd_gtt_sa_free(dbgdev->dev, mem_obj);
@@ -735,21 +706,19 @@ static int dbgdev_wave_control_nodiq(struct kfd_dbgdev *dbgdev,
union GRBM_GFX_INDEX_BITS reg_gfx_index;
struct kfd_process_device *pdd;
- BUG_ON(!dbgdev || !dbgdev->dev || !wac_info);
-
reg_sq_cmd.u32All = 0;
/* taking the VMID for that process on the safe way using PDD */
pdd = kfd_get_process_device_data(dbgdev->dev, wac_info->process);
if (!pdd) {
- pr_err("amdkfd: Failed to get pdd for wave control no DIQ\n");
+ pr_err("Failed to get pdd for wave control no DIQ\n");
return -EFAULT;
}
status = dbgdev_wave_control_set_registers(wac_info, &reg_sq_cmd,
&reg_gfx_index);
if (status) {
- pr_err("amdkfd: Failed to set wave control registers\n");
+ pr_err("Failed to set wave control registers\n");
return status;
}
@@ -818,12 +787,13 @@ int dbgdev_wave_reset_wavefronts(struct kfd_dev *dev, struct kfd_process *p)
/* Scan all registers in the range ATC_VMID8_PASID_MAPPING ..
* ATC_VMID15_PASID_MAPPING
- * to check which VMID the current process is mapped to. */
+ * to check which VMID the current process is mapped to.
+ */
for (vmid = first_vmid_to_scan; vmid <= last_vmid_to_scan; vmid++) {
if (dev->kfd2kgd->get_atc_vmid_pasid_mapping_valid
(dev->kgd, vmid)) {
- if (dev->kfd2kgd->get_atc_vmid_pasid_mapping_valid
+ if (dev->kfd2kgd->get_atc_vmid_pasid_mapping_pasid
(dev->kgd, vmid) == p->pasid) {
pr_debug("Killing wave fronts of vmid %d and pasid %d\n",
vmid, p->pasid);
@@ -833,7 +803,7 @@ int dbgdev_wave_reset_wavefronts(struct kfd_dev *dev, struct kfd_process *p)
}
if (vmid > last_vmid_to_scan) {
- pr_err("amdkfd: didn't found vmid for pasid (%d)\n", p->pasid);
+ pr_err("Didn't find vmid for pasid %d\n", p->pasid);
return -EFAULT;
}
@@ -860,8 +830,6 @@ int dbgdev_wave_reset_wavefronts(struct kfd_dev *dev, struct kfd_process *p)
void kfd_dbgdev_init(struct kfd_dbgdev *pdbgdev, struct kfd_dev *pdev,
enum DBGDEV_TYPE type)
{
- BUG_ON(!pdbgdev || !pdev);
-
pdbgdev->dev = pdev;
pdbgdev->kq = NULL;
pdbgdev->type = type;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c b/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c
index 56d676396342..3da25f7bda6b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c
@@ -44,8 +44,6 @@ struct mutex *kfd_get_dbgmgr_mutex(void)
static void kfd_dbgmgr_uninitialize(struct kfd_dbgmgr *pmgr)
{
- BUG_ON(!pmgr);
-
kfree(pmgr->dbgdev);
pmgr->dbgdev = NULL;
@@ -55,7 +53,7 @@ static void kfd_dbgmgr_uninitialize(struct kfd_dbgmgr *pmgr)
void kfd_dbgmgr_destroy(struct kfd_dbgmgr *pmgr)
{
- if (pmgr != NULL) {
+ if (pmgr) {
kfd_dbgmgr_uninitialize(pmgr);
kfree(pmgr);
}
@@ -66,12 +64,12 @@ bool kfd_dbgmgr_create(struct kfd_dbgmgr **ppmgr, struct kfd_dev *pdev)
enum DBGDEV_TYPE type = DBGDEV_TYPE_DIQ;
struct kfd_dbgmgr *new_buff;
- BUG_ON(pdev == NULL);
- BUG_ON(!pdev->init_complete);
+ if (WARN_ON(!pdev->init_complete))
+ return false;
new_buff = kfd_alloc_struct(new_buff);
if (!new_buff) {
- pr_err("amdkfd: Failed to allocate dbgmgr instance\n");
+ pr_err("Failed to allocate dbgmgr instance\n");
return false;
}
@@ -79,7 +77,7 @@ bool kfd_dbgmgr_create(struct kfd_dbgmgr **ppmgr, struct kfd_dev *pdev)
new_buff->dev = pdev;
new_buff->dbgdev = kfd_alloc_struct(new_buff->dbgdev);
if (!new_buff->dbgdev) {
- pr_err("amdkfd: Failed to allocate dbgdev instance\n");
+ pr_err("Failed to allocate dbgdev instance\n");
kfree(new_buff);
return false;
}
@@ -96,8 +94,6 @@ bool kfd_dbgmgr_create(struct kfd_dbgmgr **ppmgr, struct kfd_dev *pdev)
long kfd_dbgmgr_register(struct kfd_dbgmgr *pmgr, struct kfd_process *p)
{
- BUG_ON(!p || !pmgr || !pmgr->dbgdev);
-
if (pmgr->pasid != 0) {
pr_debug("H/W debugger is already active using pasid %d\n",
pmgr->pasid);
@@ -118,8 +114,6 @@ long kfd_dbgmgr_register(struct kfd_dbgmgr *pmgr, struct kfd_process *p)
long kfd_dbgmgr_unregister(struct kfd_dbgmgr *pmgr, struct kfd_process *p)
{
- BUG_ON(!p || !pmgr || !pmgr->dbgdev);
-
/* Is the requests coming from the already registered process? */
if (pmgr->pasid != p->pasid) {
pr_debug("H/W debugger is not registered by calling pasid %d\n",
@@ -137,8 +131,6 @@ long kfd_dbgmgr_unregister(struct kfd_dbgmgr *pmgr, struct kfd_process *p)
long kfd_dbgmgr_wave_control(struct kfd_dbgmgr *pmgr,
struct dbg_wave_control_info *wac_info)
{
- BUG_ON(!pmgr || !pmgr->dbgdev || !wac_info);
-
/* Is the requests coming from the already registered process? */
if (pmgr->pasid != wac_info->process->pasid) {
pr_debug("H/W debugger support was not registered for requester pasid %d\n",
@@ -152,9 +144,6 @@ long kfd_dbgmgr_wave_control(struct kfd_dbgmgr *pmgr,
long kfd_dbgmgr_address_watch(struct kfd_dbgmgr *pmgr,
struct dbg_address_watch_info *adw_info)
{
- BUG_ON(!pmgr || !pmgr->dbgdev || !adw_info);
-
-
/* Is the requests coming from the already registered process? */
if (pmgr->pasid != adw_info->process->pasid) {
pr_debug("H/W debugger support was not registered for requester pasid %d\n",
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.h b/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.h
index 257a745ad0b5..a04a1fe1d0d9 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.h
@@ -30,13 +30,11 @@
#pragma pack(push, 4)
enum HSA_DBG_WAVEOP {
- HSA_DBG_WAVEOP_HALT = 1, /* Halts a wavefront */
- HSA_DBG_WAVEOP_RESUME = 2, /* Resumes a wavefront */
- HSA_DBG_WAVEOP_KILL = 3, /* Kills a wavefront */
- HSA_DBG_WAVEOP_DEBUG = 4, /* Causes wavefront to enter
- debug mode */
- HSA_DBG_WAVEOP_TRAP = 5, /* Causes wavefront to take
- a trap */
+ HSA_DBG_WAVEOP_HALT = 1, /* Halts a wavefront */
+ HSA_DBG_WAVEOP_RESUME = 2, /* Resumes a wavefront */
+ HSA_DBG_WAVEOP_KILL = 3, /* Kills a wavefront */
+ HSA_DBG_WAVEOP_DEBUG = 4, /* Causes wavefront to enter dbg mode */
+ HSA_DBG_WAVEOP_TRAP = 5, /* Causes wavefront to take a trap */
HSA_DBG_NUM_WAVEOP = 5,
HSA_DBG_MAX_WAVEOP = 0xFFFFFFFF
};
@@ -81,15 +79,13 @@ struct HsaDbgWaveMsgAMDGen2 {
uint32_t UserData:8; /* user data */
uint32_t ShaderArray:1; /* Shader array */
uint32_t Priv:1; /* Privileged */
- uint32_t Reserved0:4; /* This field is reserved,
- should be 0 */
+ uint32_t Reserved0:4; /* Reserved, should be 0 */
uint32_t WaveId:4; /* wave id */
uint32_t SIMD:2; /* SIMD id */
uint32_t HSACU:4; /* Compute unit */
uint32_t ShaderEngine:2;/* Shader engine */
uint32_t MessageType:2; /* see HSA_DBG_WAVEMSG_TYPE */
- uint32_t Reserved1:4; /* This field is reserved,
- should be 0 */
+ uint32_t Reserved1:4; /* Reserved, should be 0 */
} ui32;
uint32_t Value;
};
@@ -121,20 +117,23 @@ struct HsaDbgWaveMessage {
* in the user mode instruction stream. The OS scheduler event is typically
* associated and signaled by an interrupt issued by the GPU, but other HSA
* system interrupt conditions from other HW (e.g. IOMMUv2) may be surfaced
- * by the KFD by this mechanism, too. */
+ * by the KFD by this mechanism, too.
+ */
/* these are the new definitions for events */
enum HSA_EVENTTYPE {
HSA_EVENTTYPE_SIGNAL = 0, /* user-mode generated GPU signal */
HSA_EVENTTYPE_NODECHANGE = 1, /* HSA node change (attach/detach) */
HSA_EVENTTYPE_DEVICESTATECHANGE = 2, /* HSA device state change
- (start/stop) */
+ * (start/stop)
+ */
HSA_EVENTTYPE_HW_EXCEPTION = 3, /* GPU shader exception event */
HSA_EVENTTYPE_SYSTEM_EVENT = 4, /* GPU SYSCALL with parameter info */
HSA_EVENTTYPE_DEBUG_EVENT = 5, /* GPU signal for debugging */
HSA_EVENTTYPE_PROFILE_EVENT = 6,/* GPU signal for profiling */
HSA_EVENTTYPE_QUEUE_EVENT = 7, /* GPU signal queue idle state
- (EOP pm4) */
+ * (EOP pm4)
+ */
/* ... */
HSA_EVENTTYPE_MAXID,
HSA_EVENTTYPE_TYPE_SIZE = 0xFFFFFFFF
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 3f95f7cb4019..61fff25b4ce7 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -26,7 +26,7 @@
#include <linux/slab.h>
#include "kfd_priv.h"
#include "kfd_device_queue_manager.h"
-#include "kfd_pm4_headers.h"
+#include "kfd_pm4_headers_vi.h"
#define MQD_SIZE_ALIGNED 768
@@ -98,11 +98,14 @@ static const struct kfd_device_info *lookup_device_info(unsigned short did)
for (i = 0; i < ARRAY_SIZE(supported_devices); i++) {
if (supported_devices[i].did == did) {
- BUG_ON(supported_devices[i].device_info == NULL);
+ WARN_ON(!supported_devices[i].device_info);
return supported_devices[i].device_info;
}
}
+ dev_warn(kfd_device, "DID %04x is missing in supported_devices\n",
+ did);
+
return NULL;
}
@@ -114,8 +117,10 @@ struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd,
const struct kfd_device_info *device_info =
lookup_device_info(pdev->device);
- if (!device_info)
+ if (!device_info) {
+ dev_err(kfd_device, "kgd2kfd_probe failed\n");
return NULL;
+ }
kfd = kzalloc(sizeof(*kfd), GFP_KERNEL);
if (!kfd)
@@ -152,15 +157,16 @@ static bool device_iommu_pasid_init(struct kfd_dev *kfd)
}
if ((iommu_info.flags & required_iommu_flags) != required_iommu_flags) {
- dev_err(kfd_device, "error required iommu flags ats(%i), pri(%i), pasid(%i)\n",
+ dev_err(kfd_device, "error required iommu flags ats %i, pri %i, pasid %i\n",
(iommu_info.flags & AMD_IOMMU_DEVICE_FLAG_ATS_SUP) != 0,
(iommu_info.flags & AMD_IOMMU_DEVICE_FLAG_PRI_SUP) != 0,
- (iommu_info.flags & AMD_IOMMU_DEVICE_FLAG_PASID_SUP) != 0);
+ (iommu_info.flags & AMD_IOMMU_DEVICE_FLAG_PASID_SUP)
+ != 0);
return false;
}
pasid_limit = min_t(unsigned int,
- (unsigned int)1 << kfd->device_info->max_pasid_bits,
+ (unsigned int)(1 << kfd->device_info->max_pasid_bits),
iommu_info.max_pasids);
/*
* last pasid is used for kernel queues doorbells
@@ -211,9 +217,8 @@ static int iommu_invalid_ppr_cb(struct pci_dev *pdev, int pasid,
flags);
dev = kfd_device_by_pci_dev(pdev);
- BUG_ON(dev == NULL);
-
- kfd_signal_iommu_event(dev, pasid, address,
+ if (!WARN_ON(!dev))
+ kfd_signal_iommu_event(dev, pasid, address,
flags & PPR_FAULT_WRITE, flags & PPR_FAULT_EXEC);
return AMD_IOMMU_INV_PRI_RSP_INVALID;
@@ -234,9 +239,9 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
* calculate max size of runlist packet.
* There can be only 2 packets at once
*/
- size += (KFD_MAX_NUM_OF_PROCESSES * sizeof(struct pm4_map_process) +
- max_num_of_queues_per_device *
- sizeof(struct pm4_map_queues) + sizeof(struct pm4_runlist)) * 2;
+ size += (KFD_MAX_NUM_OF_PROCESSES * sizeof(struct pm4_mes_map_process) +
+ max_num_of_queues_per_device * sizeof(struct pm4_mes_map_queues)
+ + sizeof(struct pm4_mes_runlist)) * 2;
/* Add size of HIQ & DIQ */
size += KFD_KERNEL_QUEUE_SIZE * 2;
@@ -247,42 +252,37 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
if (kfd->kfd2kgd->init_gtt_mem_allocation(
kfd->kgd, size, &kfd->gtt_mem,
&kfd->gtt_start_gpu_addr, &kfd->gtt_start_cpu_ptr)){
- dev_err(kfd_device,
- "Could not allocate %d bytes for device (%x:%x)\n",
- size, kfd->pdev->vendor, kfd->pdev->device);
+ dev_err(kfd_device, "Could not allocate %d bytes\n", size);
goto out;
}
- dev_info(kfd_device,
- "Allocated %d bytes on gart for device(%x:%x)\n",
- size, kfd->pdev->vendor, kfd->pdev->device);
+ dev_info(kfd_device, "Allocated %d bytes on gart\n", size);
/* Initialize GTT sa with 512 byte chunk size */
if (kfd_gtt_sa_init(kfd, size, 512) != 0) {
- dev_err(kfd_device,
- "Error initializing gtt sub-allocator\n");
+ dev_err(kfd_device, "Error initializing gtt sub-allocator\n");
goto kfd_gtt_sa_init_error;
}
- kfd_doorbell_init(kfd);
-
- if (kfd_topology_add_device(kfd) != 0) {
+ if (kfd_doorbell_init(kfd)) {
dev_err(kfd_device,
- "Error adding device (%x:%x) to topology\n",
- kfd->pdev->vendor, kfd->pdev->device);
+ "Error initializing doorbell aperture\n");
+ goto kfd_doorbell_error;
+ }
+
+ if (kfd_topology_add_device(kfd)) {
+ dev_err(kfd_device, "Error adding device to topology\n");
goto kfd_topology_add_device_error;
}
if (kfd_interrupt_init(kfd)) {
- dev_err(kfd_device,
- "Error initializing interrupts for device (%x:%x)\n",
- kfd->pdev->vendor, kfd->pdev->device);
+ dev_err(kfd_device, "Error initializing interrupts\n");
goto kfd_interrupt_error;
}
if (!device_iommu_pasid_init(kfd)) {
dev_err(kfd_device,
- "Error initializing iommuv2 for device (%x:%x)\n",
+ "Error initializing iommuv2 for device %x:%x\n",
kfd->pdev->vendor, kfd->pdev->device);
goto device_iommu_pasid_error;
}
@@ -292,15 +292,13 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
kfd->dqm = device_queue_manager_init(kfd);
if (!kfd->dqm) {
- dev_err(kfd_device,
- "Error initializing queue manager for device (%x:%x)\n",
- kfd->pdev->vendor, kfd->pdev->device);
+ dev_err(kfd_device, "Error initializing queue manager\n");
goto device_queue_manager_error;
}
- if (kfd->dqm->ops.start(kfd->dqm) != 0) {
+ if (kfd->dqm->ops.start(kfd->dqm)) {
dev_err(kfd_device,
- "Error starting queuen manager for device (%x:%x)\n",
+ "Error starting queue manager for device %x:%x\n",
kfd->pdev->vendor, kfd->pdev->device);
goto dqm_start_error;
}
@@ -308,10 +306,10 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
kfd->dbgmgr = NULL;
kfd->init_complete = true;
- dev_info(kfd_device, "added device (%x:%x)\n", kfd->pdev->vendor,
+ dev_info(kfd_device, "added device %x:%x\n", kfd->pdev->vendor,
kfd->pdev->device);
- pr_debug("kfd: Starting kfd with the following scheduling policy %d\n",
+ pr_debug("Starting kfd with the following scheduling policy %d\n",
sched_policy);
goto out;
@@ -325,11 +323,13 @@ device_iommu_pasid_error:
kfd_interrupt_error:
kfd_topology_remove_device(kfd);
kfd_topology_add_device_error:
+ kfd_doorbell_fini(kfd);
+kfd_doorbell_error:
kfd_gtt_sa_fini(kfd);
kfd_gtt_sa_init_error:
kfd->kfd2kgd->free_gtt_mem(kfd->kgd, kfd->gtt_mem);
dev_err(kfd_device,
- "device (%x:%x) NOT added due to errors\n",
+ "device %x:%x NOT added due to errors\n",
kfd->pdev->vendor, kfd->pdev->device);
out:
return kfd->init_complete;
@@ -342,6 +342,7 @@ void kgd2kfd_device_exit(struct kfd_dev *kfd)
amd_iommu_free_device(kfd->pdev);
kfd_interrupt_exit(kfd);
kfd_topology_remove_device(kfd);
+ kfd_doorbell_fini(kfd);
kfd_gtt_sa_fini(kfd);
kfd->kfd2kgd->free_gtt_mem(kfd->kgd, kfd->gtt_mem);
}
@@ -351,8 +352,6 @@ void kgd2kfd_device_exit(struct kfd_dev *kfd)
void kgd2kfd_suspend(struct kfd_dev *kfd)
{
- BUG_ON(kfd == NULL);
-
if (kfd->init_complete) {
kfd->dqm->ops.stop(kfd->dqm);
amd_iommu_set_invalidate_ctx_cb(kfd->pdev, NULL);
@@ -366,14 +365,15 @@ int kgd2kfd_resume(struct kfd_dev *kfd)
unsigned int pasid_limit;
int err;
- BUG_ON(kfd == NULL);
-
pasid_limit = kfd_get_pasid_limit();
if (kfd->init_complete) {
err = amd_iommu_init_device(kfd->pdev, pasid_limit);
- if (err < 0)
+ if (err < 0) {
+ dev_err(kfd_device, "failed to initialize iommu\n");
return -ENXIO;
+ }
+
amd_iommu_set_invalidate_ctx_cb(kfd->pdev,
iommu_pasid_shutdown_callback);
amd_iommu_set_invalid_ppr_cb(kfd->pdev, iommu_invalid_ppr_cb);
@@ -402,26 +402,27 @@ void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size,
unsigned int chunk_size)
{
- unsigned int num_of_bits;
+ unsigned int num_of_longs;
- BUG_ON(!kfd);
- BUG_ON(!kfd->gtt_mem);
- BUG_ON(buf_size < chunk_size);
- BUG_ON(buf_size == 0);
- BUG_ON(chunk_size == 0);
+ if (WARN_ON(buf_size < chunk_size))
+ return -EINVAL;
+ if (WARN_ON(buf_size == 0))
+ return -EINVAL;
+ if (WARN_ON(chunk_size == 0))
+ return -EINVAL;
kfd->gtt_sa_chunk_size = chunk_size;
kfd->gtt_sa_num_of_chunks = buf_size / chunk_size;
- num_of_bits = kfd->gtt_sa_num_of_chunks / BITS_PER_BYTE;
- BUG_ON(num_of_bits == 0);
+ num_of_longs = (kfd->gtt_sa_num_of_chunks + BITS_PER_LONG - 1) /
+ BITS_PER_LONG;
- kfd->gtt_sa_bitmap = kzalloc(num_of_bits, GFP_KERNEL);
+ kfd->gtt_sa_bitmap = kcalloc(num_of_longs, sizeof(long), GFP_KERNEL);
if (!kfd->gtt_sa_bitmap)
return -ENOMEM;
- pr_debug("kfd: gtt_sa_num_of_chunks = %d, gtt_sa_bitmap = %p\n",
+ pr_debug("gtt_sa_num_of_chunks = %d, gtt_sa_bitmap = %p\n",
kfd->gtt_sa_num_of_chunks, kfd->gtt_sa_bitmap);
mutex_init(&kfd->gtt_sa_lock);
@@ -455,8 +456,6 @@ int kfd_gtt_sa_allocate(struct kfd_dev *kfd, unsigned int size,
{
unsigned int found, start_search, cur_size;
- BUG_ON(!kfd);
-
if (size == 0)
return -EINVAL;
@@ -467,7 +466,7 @@ int kfd_gtt_sa_allocate(struct kfd_dev *kfd, unsigned int size,
if ((*mem_obj) == NULL)
return -ENOMEM;
- pr_debug("kfd: allocated mem_obj = %p for size = %d\n", *mem_obj, size);
+ pr_debug("Allocated mem_obj = %p for size = %d\n", *mem_obj, size);
start_search = 0;
@@ -479,7 +478,7 @@ kfd_gtt_restart_search:
kfd->gtt_sa_num_of_chunks,
start_search);
- pr_debug("kfd: found = %d\n", found);
+ pr_debug("Found = %d\n", found);
/* If there wasn't any free chunk, bail out */
if (found == kfd->gtt_sa_num_of_chunks)
@@ -497,12 +496,12 @@ kfd_gtt_restart_search:
found,
kfd->gtt_sa_chunk_size);
- pr_debug("kfd: gpu_addr = %p, cpu_addr = %p\n",
+ pr_debug("gpu_addr = %p, cpu_addr = %p\n",
(uint64_t *) (*mem_obj)->gpu_addr, (*mem_obj)->cpu_ptr);
/* If we need only one chunk, mark it as allocated and get out */
if (size <= kfd->gtt_sa_chunk_size) {
- pr_debug("kfd: single bit\n");
+ pr_debug("Single bit\n");
set_bit(found, kfd->gtt_sa_bitmap);
goto kfd_gtt_out;
}
@@ -537,7 +536,7 @@ kfd_gtt_restart_search:
} while (cur_size > 0);
- pr_debug("kfd: range_start = %d, range_end = %d\n",
+ pr_debug("range_start = %d, range_end = %d\n",
(*mem_obj)->range_start, (*mem_obj)->range_end);
/* Mark the chunks as allocated */
@@ -551,7 +550,7 @@ kfd_gtt_out:
return 0;
kfd_gtt_no_free_chunk:
- pr_debug("kfd: allocation failed with mem_obj = %p\n", mem_obj);
+ pr_debug("Allocation failed with mem_obj = %p\n", mem_obj);
mutex_unlock(&kfd->gtt_sa_lock);
kfree(mem_obj);
return -ENOMEM;
@@ -561,13 +560,11 @@ int kfd_gtt_sa_free(struct kfd_dev *kfd, struct kfd_mem_obj *mem_obj)
{
unsigned int bit;
- BUG_ON(!kfd);
-
/* Act like kfree when trying to free a NULL object */
if (!mem_obj)
return 0;
- pr_debug("kfd: free mem_obj = %p, range_start = %d, range_end = %d\n",
+ pr_debug("Free mem_obj = %p, range_start = %d, range_end = %d\n",
mem_obj, mem_obj->range_start, mem_obj->range_end);
mutex_lock(&kfd->gtt_sa_lock);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index 602769ced3bd..53a66e821624 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -79,20 +79,17 @@ static bool is_pipe_enabled(struct device_queue_manager *dqm, int mec, int pipe)
unsigned int get_queues_num(struct device_queue_manager *dqm)
{
- BUG_ON(!dqm || !dqm->dev);
return bitmap_weight(dqm->dev->shared_resources.queue_bitmap,
KGD_MAX_QUEUES);
}
unsigned int get_queues_per_pipe(struct device_queue_manager *dqm)
{
- BUG_ON(!dqm || !dqm->dev);
return dqm->dev->shared_resources.num_queue_per_pipe;
}
unsigned int get_pipes_per_mec(struct device_queue_manager *dqm)
{
- BUG_ON(!dqm || !dqm->dev);
return dqm->dev->shared_resources.num_pipe_per_mec;
}
@@ -121,7 +118,7 @@ static int allocate_vmid(struct device_queue_manager *dqm,
/* Kaveri kfd vmid's starts from vmid 8 */
allocated_vmid = bit + KFD_VMID_START_OFFSET;
- pr_debug("kfd: vmid allocation %d\n", allocated_vmid);
+ pr_debug("vmid allocation %d\n", allocated_vmid);
qpd->vmid = allocated_vmid;
q->properties.vmid = allocated_vmid;
@@ -152,42 +149,38 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm,
{
int retval;
- BUG_ON(!dqm || !q || !qpd || !allocated_vmid);
-
- pr_debug("kfd: In func %s\n", __func__);
print_queue(q);
mutex_lock(&dqm->lock);
if (dqm->total_queue_count >= max_num_of_queues_per_device) {
- pr_warn("amdkfd: Can't create new usermode queue because %d queues were already created\n",
+ pr_warn("Can't create new usermode queue because %d queues were already created\n",
dqm->total_queue_count);
- mutex_unlock(&dqm->lock);
- return -EPERM;
+ retval = -EPERM;
+ goto out_unlock;
}
if (list_empty(&qpd->queues_list)) {
retval = allocate_vmid(dqm, qpd, q);
- if (retval != 0) {
- mutex_unlock(&dqm->lock);
- return retval;
- }
+ if (retval)
+ goto out_unlock;
}
*allocated_vmid = qpd->vmid;
q->properties.vmid = qpd->vmid;
if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE)
retval = create_compute_queue_nocpsch(dqm, q, qpd);
- if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
+ else if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
retval = create_sdma_queue_nocpsch(dqm, q, qpd);
+ else
+ retval = -EINVAL;
- if (retval != 0) {
+ if (retval) {
if (list_empty(&qpd->queues_list)) {
deallocate_vmid(dqm, qpd, q);
*allocated_vmid = 0;
}
- mutex_unlock(&dqm->lock);
- return retval;
+ goto out_unlock;
}
list_add(&q->list, &qpd->queues_list);
@@ -205,8 +198,9 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm,
pr_debug("Total of %d queues are accountable so far\n",
dqm->total_queue_count);
+out_unlock:
mutex_unlock(&dqm->lock);
- return 0;
+ return retval;
}
static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q)
@@ -216,7 +210,8 @@ static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q)
set = false;
- for (pipe = dqm->next_pipe_to_allocate, i = 0; i < get_pipes_per_mec(dqm);
+ for (pipe = dqm->next_pipe_to_allocate, i = 0;
+ i < get_pipes_per_mec(dqm);
pipe = ((pipe + 1) % get_pipes_per_mec(dqm)), ++i) {
if (!is_pipe_enabled(dqm, 0, pipe))
@@ -239,8 +234,7 @@ static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q)
if (!set)
return -EBUSY;
- pr_debug("kfd: DQM %s hqd slot - pipe (%d) queue(%d)\n",
- __func__, q->pipe, q->queue);
+ pr_debug("hqd slot - pipe %d, queue %d\n", q->pipe, q->queue);
/* horizontal hqd allocation */
dqm->next_pipe_to_allocate = (pipe + 1) % get_pipes_per_mec(dqm);
@@ -260,36 +254,38 @@ static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
int retval;
struct mqd_manager *mqd;
- BUG_ON(!dqm || !q || !qpd);
-
mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE);
- if (mqd == NULL)
+ if (!mqd)
return -ENOMEM;
retval = allocate_hqd(dqm, q);
- if (retval != 0)
+ if (retval)
return retval;
retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
&q->gart_mqd_addr, &q->properties);
- if (retval != 0) {
- deallocate_hqd(dqm, q);
- return retval;
- }
+ if (retval)
+ goto out_deallocate_hqd;
- pr_debug("kfd: loading mqd to hqd on pipe (%d) queue (%d)\n",
- q->pipe,
- q->queue);
+ pr_debug("Loading mqd to hqd on pipe %d, queue %d\n",
+ q->pipe, q->queue);
- retval = mqd->load_mqd(mqd, q->mqd, q->pipe,
- q->queue, (uint32_t __user *) q->properties.write_ptr);
- if (retval != 0) {
- deallocate_hqd(dqm, q);
- mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
- return retval;
- }
+ dqm->dev->kfd2kgd->set_scratch_backing_va(
+ dqm->dev->kgd, qpd->sh_hidden_private_base, qpd->vmid);
+
+ retval = mqd->load_mqd(mqd, q->mqd, q->pipe, q->queue, &q->properties,
+ q->process->mm);
+ if (retval)
+ goto out_uninit_mqd;
return 0;
+
+out_uninit_mqd:
+ mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
+out_deallocate_hqd:
+ deallocate_hqd(dqm, q);
+
+ return retval;
}
static int destroy_queue_nocpsch(struct device_queue_manager *dqm,
@@ -299,12 +295,8 @@ static int destroy_queue_nocpsch(struct device_queue_manager *dqm,
int retval;
struct mqd_manager *mqd;
- BUG_ON(!dqm || !q || !q->mqd || !qpd);
-
retval = 0;
- pr_debug("kfd: In Func %s\n", __func__);
-
mutex_lock(&dqm->lock);
if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE) {
@@ -323,7 +315,7 @@ static int destroy_queue_nocpsch(struct device_queue_manager *dqm,
dqm->sdma_queue_count--;
deallocate_sdma_queue(dqm, q->sdma_id);
} else {
- pr_debug("q->properties.type is invalid (%d)\n",
+ pr_debug("q->properties.type %d is invalid\n",
q->properties.type);
retval = -EINVAL;
goto out;
@@ -334,7 +326,7 @@ static int destroy_queue_nocpsch(struct device_queue_manager *dqm,
QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS,
q->pipe, q->queue);
- if (retval != 0)
+ if (retval)
goto out;
mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
@@ -364,14 +356,12 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
struct mqd_manager *mqd;
bool prev_active = false;
- BUG_ON(!dqm || !q || !q->mqd);
-
mutex_lock(&dqm->lock);
mqd = dqm->ops.get_mqd_manager(dqm,
get_mqd_type_from_queue_type(q->properties.type));
- if (mqd == NULL) {
- mutex_unlock(&dqm->lock);
- return -ENOMEM;
+ if (!mqd) {
+ retval = -ENOMEM;
+ goto out_unlock;
}
if (q->properties.is_active)
@@ -385,12 +375,13 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
retval = mqd->update_mqd(mqd, q->mqd, &q->properties);
if ((q->properties.is_active) && (!prev_active))
dqm->queue_count++;
- else if ((!q->properties.is_active) && (prev_active))
+ else if (!q->properties.is_active && prev_active)
dqm->queue_count--;
if (sched_policy != KFD_SCHED_POLICY_NO_HWS)
retval = execute_queues_cpsch(dqm, false);
+out_unlock:
mutex_unlock(&dqm->lock);
return retval;
}
@@ -400,15 +391,16 @@ static struct mqd_manager *get_mqd_manager_nocpsch(
{
struct mqd_manager *mqd;
- BUG_ON(!dqm || type >= KFD_MQD_TYPE_MAX);
+ if (WARN_ON(type >= KFD_MQD_TYPE_MAX))
+ return NULL;
- pr_debug("kfd: In func %s mqd type %d\n", __func__, type);
+ pr_debug("mqd type %d\n", type);
mqd = dqm->mqds[type];
if (!mqd) {
mqd = mqd_manager_init(type, dqm->dev);
- if (mqd == NULL)
- pr_err("kfd: mqd manager is NULL");
+ if (!mqd)
+ pr_err("mqd manager is NULL");
dqm->mqds[type] = mqd;
}
@@ -421,11 +413,7 @@ static int register_process_nocpsch(struct device_queue_manager *dqm,
struct device_process_node *n;
int retval;
- BUG_ON(!dqm || !qpd);
-
- pr_debug("kfd: In func %s\n", __func__);
-
- n = kzalloc(sizeof(struct device_process_node), GFP_KERNEL);
+ n = kzalloc(sizeof(*n), GFP_KERNEL);
if (!n)
return -ENOMEM;
@@ -449,10 +437,6 @@ static int unregister_process_nocpsch(struct device_queue_manager *dqm,
int retval;
struct device_process_node *cur, *next;
- BUG_ON(!dqm || !qpd);
-
- pr_debug("In func %s\n", __func__);
-
pr_debug("qpd->queues_list is %s\n",
list_empty(&qpd->queues_list) ? "empty" : "not empty");
@@ -493,51 +477,39 @@ static void init_interrupts(struct device_queue_manager *dqm)
{
unsigned int i;
- BUG_ON(dqm == NULL);
-
for (i = 0 ; i < get_pipes_per_mec(dqm) ; i++)
if (is_pipe_enabled(dqm, 0, i))
dqm->dev->kfd2kgd->init_interrupts(dqm->dev->kgd, i);
}
-static int init_scheduler(struct device_queue_manager *dqm)
-{
- int retval = 0;
-
- BUG_ON(!dqm);
-
- pr_debug("kfd: In %s\n", __func__);
-
- return retval;
-}
-
static int initialize_nocpsch(struct device_queue_manager *dqm)
{
- int i;
+ int pipe, queue;
- BUG_ON(!dqm);
+ pr_debug("num of pipes: %d\n", get_pipes_per_mec(dqm));
- pr_debug("kfd: In func %s num of pipes: %d\n",
- __func__, get_pipes_per_mec(dqm));
+ dqm->allocated_queues = kcalloc(get_pipes_per_mec(dqm),
+ sizeof(unsigned int), GFP_KERNEL);
+ if (!dqm->allocated_queues)
+ return -ENOMEM;
mutex_init(&dqm->lock);
INIT_LIST_HEAD(&dqm->queues);
dqm->queue_count = dqm->next_pipe_to_allocate = 0;
dqm->sdma_queue_count = 0;
- dqm->allocated_queues = kcalloc(get_pipes_per_mec(dqm),
- sizeof(unsigned int), GFP_KERNEL);
- if (!dqm->allocated_queues) {
- mutex_destroy(&dqm->lock);
- return -ENOMEM;
- }
- for (i = 0; i < get_pipes_per_mec(dqm); i++)
- dqm->allocated_queues[i] = (1 << get_queues_per_pipe(dqm)) - 1;
+ for (pipe = 0; pipe < get_pipes_per_mec(dqm); pipe++) {
+ int pipe_offset = pipe * get_queues_per_pipe(dqm);
+
+ for (queue = 0; queue < get_queues_per_pipe(dqm); queue++)
+ if (test_bit(pipe_offset + queue,
+ dqm->dev->shared_resources.queue_bitmap))
+ dqm->allocated_queues[pipe] |= 1 << queue;
+ }
dqm->vmid_bitmap = (1 << VMID_PER_DEVICE) - 1;
dqm->sdma_bitmap = (1 << CIK_SDMA_QUEUES) - 1;
- init_scheduler(dqm);
return 0;
}
@@ -545,9 +517,7 @@ static void uninitialize_nocpsch(struct device_queue_manager *dqm)
{
int i;
- BUG_ON(!dqm);
-
- BUG_ON(dqm->queue_count > 0 || dqm->processes_count > 0);
+ WARN_ON(dqm->queue_count > 0 || dqm->processes_count > 0);
kfree(dqm->allocated_queues);
for (i = 0 ; i < KFD_MQD_TYPE_MAX ; i++)
@@ -604,33 +574,34 @@ static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm,
return -ENOMEM;
retval = allocate_sdma_queue(dqm, &q->sdma_id);
- if (retval != 0)
+ if (retval)
return retval;
q->properties.sdma_queue_id = q->sdma_id % CIK_SDMA_QUEUES_PER_ENGINE;
q->properties.sdma_engine_id = q->sdma_id / CIK_SDMA_ENGINE_NUM;
- pr_debug("kfd: sdma id is: %d\n", q->sdma_id);
- pr_debug(" sdma queue id: %d\n", q->properties.sdma_queue_id);
- pr_debug(" sdma engine id: %d\n", q->properties.sdma_engine_id);
+ pr_debug("SDMA id is: %d\n", q->sdma_id);
+ pr_debug("SDMA queue id: %d\n", q->properties.sdma_queue_id);
+ pr_debug("SDMA engine id: %d\n", q->properties.sdma_engine_id);
dqm->ops_asic_specific.init_sdma_vm(dqm, q, qpd);
retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
&q->gart_mqd_addr, &q->properties);
- if (retval != 0) {
- deallocate_sdma_queue(dqm, q->sdma_id);
- return retval;
- }
+ if (retval)
+ goto out_deallocate_sdma_queue;
- retval = mqd->load_mqd(mqd, q->mqd, 0,
- 0, NULL);
- if (retval != 0) {
- deallocate_sdma_queue(dqm, q->sdma_id);
- mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
- return retval;
- }
+ retval = mqd->load_mqd(mqd, q->mqd, 0, 0, &q->properties, NULL);
+ if (retval)
+ goto out_uninit_mqd;
return 0;
+
+out_uninit_mqd:
+ mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
+out_deallocate_sdma_queue:
+ deallocate_sdma_queue(dqm, q->sdma_id);
+
+ return retval;
}
/*
@@ -642,10 +613,6 @@ static int set_sched_resources(struct device_queue_manager *dqm)
int i, mec;
struct scheduling_resources res;
- BUG_ON(!dqm);
-
- pr_debug("kfd: In func %s\n", __func__);
-
res.vmid_mask = (1 << VMID_PER_DEVICE) - 1;
res.vmid_mask <<= KFD_VMID_START_OFFSET;
@@ -663,8 +630,9 @@ static int set_sched_resources(struct device_queue_manager *dqm)
/* This situation may be hit in the future if a new HW
* generation exposes more than 64 queues. If so, the
- * definition of res.queue_mask needs updating */
- if (WARN_ON(i > (sizeof(res.queue_mask)*8))) {
+ * definition of res.queue_mask needs updating
+ */
+ if (WARN_ON(i >= (sizeof(res.queue_mask)*8))) {
pr_err("Invalid queue enabled by amdgpu: %d\n", i);
break;
}
@@ -674,9 +642,9 @@ static int set_sched_resources(struct device_queue_manager *dqm)
res.gws_mask = res.oac_mask = res.gds_heap_base =
res.gds_heap_size = 0;
- pr_debug("kfd: scheduling resources:\n"
- " vmid mask: 0x%8X\n"
- " queue mask: 0x%8llX\n",
+ pr_debug("Scheduling resources:\n"
+ "vmid mask: 0x%8X\n"
+ "queue mask: 0x%8llX\n",
res.vmid_mask, res.queue_mask);
return pm_send_set_resources(&dqm->packets, &res);
@@ -686,10 +654,7 @@ static int initialize_cpsch(struct device_queue_manager *dqm)
{
int retval;
- BUG_ON(!dqm);
-
- pr_debug("kfd: In func %s num of pipes: %d\n",
- __func__, get_pipes_per_mec(dqm));
+ pr_debug("num of pipes: %d\n", get_pipes_per_mec(dqm));
mutex_init(&dqm->lock);
INIT_LIST_HEAD(&dqm->queues);
@@ -697,13 +662,9 @@ static int initialize_cpsch(struct device_queue_manager *dqm)
dqm->sdma_queue_count = 0;
dqm->active_runlist = false;
retval = dqm->ops_asic_specific.initialize(dqm);
- if (retval != 0)
- goto fail_init_pipelines;
-
- return 0;
+ if (retval)
+ mutex_destroy(&dqm->lock);
-fail_init_pipelines:
- mutex_destroy(&dqm->lock);
return retval;
}
@@ -712,25 +673,23 @@ static int start_cpsch(struct device_queue_manager *dqm)
struct device_process_node *node;
int retval;
- BUG_ON(!dqm);
-
retval = 0;
retval = pm_init(&dqm->packets, dqm);
- if (retval != 0)
+ if (retval)
goto fail_packet_manager_init;
retval = set_sched_resources(dqm);
- if (retval != 0)
+ if (retval)
goto fail_set_sched_resources;
- pr_debug("kfd: allocating fence memory\n");
+ pr_debug("Allocating fence memory\n");
/* allocate fence memory on the gart */
retval = kfd_gtt_sa_allocate(dqm->dev, sizeof(*dqm->fence_addr),
&dqm->fence_mem);
- if (retval != 0)
+ if (retval)
goto fail_allocate_vidmem;
dqm->fence_addr = dqm->fence_mem->cpu_ptr;
@@ -758,8 +717,6 @@ static int stop_cpsch(struct device_queue_manager *dqm)
struct device_process_node *node;
struct kfd_process_device *pdd;
- BUG_ON(!dqm);
-
destroy_queues_cpsch(dqm, true, true);
list_for_each_entry(node, &dqm->queues, list) {
@@ -776,13 +733,9 @@ static int create_kernel_queue_cpsch(struct device_queue_manager *dqm,
struct kernel_queue *kq,
struct qcm_process_device *qpd)
{
- BUG_ON(!dqm || !kq || !qpd);
-
- pr_debug("kfd: In func %s\n", __func__);
-
mutex_lock(&dqm->lock);
if (dqm->total_queue_count >= max_num_of_queues_per_device) {
- pr_warn("amdkfd: Can't create new kernel queue because %d queues were already created\n",
+ pr_warn("Can't create new kernel queue because %d queues were already created\n",
dqm->total_queue_count);
mutex_unlock(&dqm->lock);
return -EPERM;
@@ -809,10 +762,6 @@ static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm,
struct kernel_queue *kq,
struct qcm_process_device *qpd)
{
- BUG_ON(!dqm || !kq);
-
- pr_debug("kfd: In %s\n", __func__);
-
mutex_lock(&dqm->lock);
/* here we actually preempt the DIQ */
destroy_queues_cpsch(dqm, true, false);
@@ -844,8 +793,6 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
int retval;
struct mqd_manager *mqd;
- BUG_ON(!dqm || !q || !qpd);
-
retval = 0;
if (allocate_vmid)
@@ -854,7 +801,7 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
mutex_lock(&dqm->lock);
if (dqm->total_queue_count >= max_num_of_queues_per_device) {
- pr_warn("amdkfd: Can't create new usermode queue because %d queues were already created\n",
+ pr_warn("Can't create new usermode queue because %d queues were already created\n",
dqm->total_queue_count);
retval = -EPERM;
goto out;
@@ -866,15 +813,15 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
mqd = dqm->ops.get_mqd_manager(dqm,
get_mqd_type_from_queue_type(q->properties.type));
- if (mqd == NULL) {
- mutex_unlock(&dqm->lock);
- return -ENOMEM;
+ if (!mqd) {
+ retval = -ENOMEM;
+ goto out;
}
dqm->ops_asic_specific.init_sdma_vm(dqm, q, qpd);
retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
&q->gart_mqd_addr, &q->properties);
- if (retval != 0)
+ if (retval)
goto out;
list_add(&q->list, &qpd->queues_list);
@@ -884,7 +831,7 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
}
if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
- dqm->sdma_queue_count++;
+ dqm->sdma_queue_count++;
/*
* Unconditionally increment this counter, regardless of the queue's
* type or whether the queue is active.
@@ -903,12 +850,11 @@ int amdkfd_fence_wait_timeout(unsigned int *fence_addr,
unsigned int fence_value,
unsigned long timeout)
{
- BUG_ON(!fence_addr);
timeout += jiffies;
while (*fence_addr != fence_value) {
if (time_after(jiffies, timeout)) {
- pr_err("kfd: qcm fence wait loop timeout expired\n");
+ pr_err("qcm fence wait loop timeout expired\n");
return -ETIME;
}
schedule();
@@ -932,8 +878,6 @@ static int destroy_queues_cpsch(struct device_queue_manager *dqm,
enum kfd_preempt_type_filter preempt_type;
struct kfd_process_device *pdd;
- BUG_ON(!dqm);
-
retval = 0;
if (lock)
@@ -941,7 +885,7 @@ static int destroy_queues_cpsch(struct device_queue_manager *dqm,
if (!dqm->active_runlist)
goto out;
- pr_debug("kfd: Before destroying queues, sdma queue count is : %u\n",
+ pr_debug("Before destroying queues, sdma queue count is : %u\n",
dqm->sdma_queue_count);
if (dqm->sdma_queue_count > 0) {
@@ -955,7 +899,7 @@ static int destroy_queues_cpsch(struct device_queue_manager *dqm,
retval = pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_COMPUTE,
preempt_type, 0, false, 0);
- if (retval != 0)
+ if (retval)
goto out;
*dqm->fence_addr = KFD_FENCE_INIT;
@@ -964,7 +908,7 @@ static int destroy_queues_cpsch(struct device_queue_manager *dqm,
/* should be timed out */
retval = amdkfd_fence_wait_timeout(dqm->fence_addr, KFD_FENCE_COMPLETED,
QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS);
- if (retval != 0) {
+ if (retval) {
pdd = kfd_get_process_device_data(dqm->dev,
kfd_get_process(current));
pdd->reset_wavefronts = true;
@@ -983,14 +927,12 @@ static int execute_queues_cpsch(struct device_queue_manager *dqm, bool lock)
{
int retval;
- BUG_ON(!dqm);
-
if (lock)
mutex_lock(&dqm->lock);
retval = destroy_queues_cpsch(dqm, false, false);
- if (retval != 0) {
- pr_err("kfd: the cp might be in an unrecoverable state due to an unsuccessful queues preemption");
+ if (retval) {
+ pr_err("The cp might be in an unrecoverable state due to an unsuccessful queues preemption");
goto out;
}
@@ -1005,8 +947,8 @@ static int execute_queues_cpsch(struct device_queue_manager *dqm, bool lock)
}
retval = pm_send_runlist(&dqm->packets, &dqm->queues);
- if (retval != 0) {
- pr_err("kfd: failed to execute runlist");
+ if (retval) {
+ pr_err("failed to execute runlist");
goto out;
}
dqm->active_runlist = true;
@@ -1025,8 +967,6 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
struct mqd_manager *mqd;
bool preempt_all_queues;
- BUG_ON(!dqm || !qpd || !q);
-
preempt_all_queues = false;
retval = 0;
@@ -1098,8 +1038,6 @@ static bool set_cache_memory_policy(struct device_queue_manager *dqm,
{
bool retval;
- pr_debug("kfd: In func %s\n", __func__);
-
mutex_lock(&dqm->lock);
if (alternate_aperture_size == 0) {
@@ -1120,14 +1058,11 @@ static bool set_cache_memory_policy(struct device_queue_manager *dqm,
uint64_t base = (uintptr_t)alternate_aperture_base;
uint64_t limit = base + alternate_aperture_size - 1;
- if (limit <= base)
- goto out;
-
- if ((base & APE1_FIXED_BITS_MASK) != 0)
- goto out;
-
- if ((limit & APE1_FIXED_BITS_MASK) != APE1_LIMIT_ALIGNMENT)
+ if (limit <= base || (base & APE1_FIXED_BITS_MASK) != 0 ||
+ (limit & APE1_FIXED_BITS_MASK) != APE1_LIMIT_ALIGNMENT) {
+ retval = false;
goto out;
+ }
qpd->sh_mem_ape1_base = base >> 16;
qpd->sh_mem_ape1_limit = limit >> 16;
@@ -1144,27 +1079,22 @@ static bool set_cache_memory_policy(struct device_queue_manager *dqm,
if ((sched_policy == KFD_SCHED_POLICY_NO_HWS) && (qpd->vmid != 0))
program_sh_mem_settings(dqm, qpd);
- pr_debug("kfd: sh_mem_config: 0x%x, ape1_base: 0x%x, ape1_limit: 0x%x\n",
+ pr_debug("sh_mem_config: 0x%x, ape1_base: 0x%x, ape1_limit: 0x%x\n",
qpd->sh_mem_config, qpd->sh_mem_ape1_base,
qpd->sh_mem_ape1_limit);
- mutex_unlock(&dqm->lock);
- return retval;
-
out:
mutex_unlock(&dqm->lock);
- return false;
+ return retval;
}
struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
{
struct device_queue_manager *dqm;
- BUG_ON(!dev);
+ pr_debug("Loading device queue manager\n");
- pr_debug("kfd: loading device queue manager\n");
-
- dqm = kzalloc(sizeof(struct device_queue_manager), GFP_KERNEL);
+ dqm = kzalloc(sizeof(*dqm), GFP_KERNEL);
if (!dqm)
return NULL;
@@ -1202,8 +1132,8 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
dqm->ops.set_cache_memory_policy = set_cache_memory_policy;
break;
default:
- BUG();
- break;
+ pr_err("Invalid scheduling policy %d\n", sched_policy);
+ goto out_free;
}
switch (dev->device_info->asic_family) {
@@ -1216,18 +1146,16 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
break;
}
- if (dqm->ops.initialize(dqm) != 0) {
- kfree(dqm);
- return NULL;
- }
+ if (!dqm->ops.initialize(dqm))
+ return dqm;
- return dqm;
+out_free:
+ kfree(dqm);
+ return NULL;
}
void device_queue_manager_uninit(struct device_queue_manager *dqm)
{
- BUG_ON(!dqm);
-
dqm->ops.uninitialize(dqm);
kfree(dqm);
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c
index 48dc0561b402..72c3cbabc0a7 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c
@@ -24,6 +24,7 @@
#include "kfd_device_queue_manager.h"
#include "cik_regs.h"
#include "oss/oss_2_4_sh_mask.h"
+#include "gca/gfx_7_2_sh_mask.h"
static bool set_cache_memory_policy_cik(struct device_queue_manager *dqm,
struct qcm_process_device *qpd,
@@ -65,7 +66,7 @@ static uint32_t compute_sh_mem_bases_64bit(unsigned int top_address_nybble)
* for LDS/Scratch and GPUVM.
*/
- BUG_ON((top_address_nybble & 1) || top_address_nybble > 0xE ||
+ WARN_ON((top_address_nybble & 1) || top_address_nybble > 0xE ||
top_address_nybble == 0);
return PRIVATE_BASE(top_address_nybble << 12) |
@@ -104,8 +105,6 @@ static int register_process_cik(struct device_queue_manager *dqm,
struct kfd_process_device *pdd;
unsigned int temp;
- BUG_ON(!dqm || !qpd);
-
pdd = qpd_to_pdd(qpd);
/* check if sh_mem_config register already configured */
@@ -125,9 +124,10 @@ static int register_process_cik(struct device_queue_manager *dqm,
} else {
temp = get_sh_mem_bases_nybble_64(pdd);
qpd->sh_mem_bases = compute_sh_mem_bases_64bit(temp);
+ qpd->sh_mem_config |= 1 << SH_MEM_CONFIG__PRIVATE_ATC__SHIFT;
}
- pr_debug("kfd: is32bit process: %d sh_mem_bases nybble: 0x%X and register 0x%X\n",
+ pr_debug("is32bit process: %d sh_mem_bases nybble: 0x%X and register 0x%X\n",
qpd->pqm->process->is_32bit_user_mode, temp, qpd->sh_mem_bases);
return 0;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c
index 7e9cae9d349b..40e9ddd096cd 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c
@@ -67,7 +67,7 @@ static uint32_t compute_sh_mem_bases_64bit(unsigned int top_address_nybble)
* for LDS/Scratch and GPUVM.
*/
- BUG_ON((top_address_nybble & 1) || top_address_nybble > 0xE ||
+ WARN_ON((top_address_nybble & 1) || top_address_nybble > 0xE ||
top_address_nybble == 0);
return top_address_nybble << 12 |
@@ -110,8 +110,6 @@ static int register_process_vi(struct device_queue_manager *dqm,
struct kfd_process_device *pdd;
unsigned int temp;
- BUG_ON(!dqm || !qpd);
-
pdd = qpd_to_pdd(qpd);
/* check if sh_mem_config register already configured */
@@ -137,9 +135,11 @@ static int register_process_vi(struct device_queue_manager *dqm,
qpd->sh_mem_bases = compute_sh_mem_bases_64bit(temp);
qpd->sh_mem_config |= SH_MEM_ADDRESS_MODE_HSA64 <<
SH_MEM_CONFIG__ADDRESS_MODE__SHIFT;
+ qpd->sh_mem_config |= 1 <<
+ SH_MEM_CONFIG__PRIVATE_ATC__SHIFT;
}
- pr_debug("kfd: is32bit process: %d sh_mem_bases nybble: 0x%X and register 0x%X\n",
+ pr_debug("is32bit process: %d sh_mem_bases nybble: 0x%X and register 0x%X\n",
qpd->pqm->process->is_32bit_user_mode, temp, qpd->sh_mem_bases);
return 0;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
index 453c5d66e5c3..acf4d2a977ad 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
@@ -59,7 +59,7 @@ static inline size_t doorbell_process_allocation(void)
}
/* Doorbell calculations for device init. */
-void kfd_doorbell_init(struct kfd_dev *kfd)
+int kfd_doorbell_init(struct kfd_dev *kfd)
{
size_t doorbell_start_offset;
size_t doorbell_aperture_size;
@@ -95,26 +95,35 @@ void kfd_doorbell_init(struct kfd_dev *kfd)
kfd->doorbell_kernel_ptr = ioremap(kfd->doorbell_base,
doorbell_process_allocation());
- BUG_ON(!kfd->doorbell_kernel_ptr);
+ if (!kfd->doorbell_kernel_ptr)
+ return -ENOMEM;
- pr_debug("kfd: doorbell initialization:\n");
- pr_debug("kfd: doorbell base == 0x%08lX\n",
+ pr_debug("Doorbell initialization:\n");
+ pr_debug("doorbell base == 0x%08lX\n",
(uintptr_t)kfd->doorbell_base);
- pr_debug("kfd: doorbell_id_offset == 0x%08lX\n",
+ pr_debug("doorbell_id_offset == 0x%08lX\n",
kfd->doorbell_id_offset);
- pr_debug("kfd: doorbell_process_limit == 0x%08lX\n",
+ pr_debug("doorbell_process_limit == 0x%08lX\n",
doorbell_process_limit);
- pr_debug("kfd: doorbell_kernel_offset == 0x%08lX\n",
+ pr_debug("doorbell_kernel_offset == 0x%08lX\n",
(uintptr_t)kfd->doorbell_base);
- pr_debug("kfd: doorbell aperture size == 0x%08lX\n",
+ pr_debug("doorbell aperture size == 0x%08lX\n",
kfd->shared_resources.doorbell_aperture_size);
- pr_debug("kfd: doorbell kernel address == 0x%08lX\n",
+ pr_debug("doorbell kernel address == 0x%08lX\n",
(uintptr_t)kfd->doorbell_kernel_ptr);
+
+ return 0;
+}
+
+void kfd_doorbell_fini(struct kfd_dev *kfd)
+{
+ if (kfd->doorbell_kernel_ptr)
+ iounmap(kfd->doorbell_kernel_ptr);
}
int kfd_doorbell_mmap(struct kfd_process *process, struct vm_area_struct *vma)
@@ -131,7 +140,7 @@ int kfd_doorbell_mmap(struct kfd_process *process, struct vm_area_struct *vma)
/* Find kfd device according to gpu id */
dev = kfd_device_by_id(vma->vm_pgoff);
- if (dev == NULL)
+ if (!dev)
return -EINVAL;
/* Calculate physical address of doorbell */
@@ -142,12 +151,11 @@ int kfd_doorbell_mmap(struct kfd_process *process, struct vm_area_struct *vma)
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- pr_debug("kfd: mapping doorbell page in %s\n"
+ pr_debug("Mapping doorbell page\n"
" target user address == 0x%08llX\n"
" physical address == 0x%08llX\n"
" vm_flags == 0x%04lX\n"
" size == 0x%04lX\n",
- __func__,
(unsigned long long) vma->vm_start, address, vma->vm_flags,
doorbell_process_allocation());
@@ -166,8 +174,6 @@ u32 __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd,
{
u32 inx;
- BUG_ON(!kfd || !doorbell_off);
-
mutex_lock(&kfd->doorbell_mutex);
inx = find_first_zero_bit(kfd->doorbell_available_index,
KFD_MAX_NUM_OF_QUEUES_PER_PROCESS);
@@ -185,7 +191,7 @@ u32 __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd,
*doorbell_off = KERNEL_DOORBELL_PASID * (doorbell_process_allocation() /
sizeof(u32)) + inx;
- pr_debug("kfd: get kernel queue doorbell\n"
+ pr_debug("Get kernel queue doorbell\n"
" doorbell offset == 0x%08X\n"
" kernel address == 0x%08lX\n",
*doorbell_off, (uintptr_t)(kfd->doorbell_kernel_ptr + inx));
@@ -197,8 +203,6 @@ void kfd_release_kernel_doorbell(struct kfd_dev *kfd, u32 __iomem *db_addr)
{
unsigned int inx;
- BUG_ON(!kfd || !db_addr);
-
inx = (unsigned int)(db_addr - kfd->doorbell_kernel_ptr);
mutex_lock(&kfd->doorbell_mutex);
@@ -210,7 +214,7 @@ inline void write_kernel_doorbell(u32 __iomem *db, u32 value)
{
if (db) {
writel(value, db);
- pr_debug("writing %d to doorbell address 0x%p\n", value, db);
+ pr_debug("Writing %d to doorbell address 0x%p\n", value, db);
}
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
index d1ce83d73a87..5979158c3f7b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
@@ -110,7 +110,7 @@ static bool allocate_free_slot(struct kfd_process *process,
*out_page = page;
*out_slot_index = slot;
- pr_debug("allocated event signal slot in page %p, slot %d\n",
+ pr_debug("Allocated event signal slot in page %p, slot %d\n",
page, slot);
return true;
@@ -155,9 +155,9 @@ static bool allocate_signal_page(struct file *devkfd, struct kfd_process *p)
struct signal_page,
event_pages)->page_index + 1;
- pr_debug("allocated new event signal page at %p, for process %p\n",
+ pr_debug("Allocated new event signal page at %p, for process %p\n",
page, p);
- pr_debug("page index is %d\n", page->page_index);
+ pr_debug("Page index is %d\n", page->page_index);
list_add(&page->event_pages, &p->signal_event_pages);
@@ -194,7 +194,8 @@ static void release_event_notification_slot(struct signal_page *page,
page->free_slots++;
/* We don't free signal pages, they are retained by the process
- * and reused until it exits. */
+ * and reused until it exits.
+ */
}
static struct signal_page *lookup_signal_page_by_index(struct kfd_process *p,
@@ -246,7 +247,7 @@ static u32 make_nonsignal_event_id(struct kfd_process *p)
for (id = p->next_nonsignal_event_id;
id < KFD_LAST_NONSIGNAL_EVENT_ID &&
- lookup_event_by_id(p, id) != NULL;
+ lookup_event_by_id(p, id);
id++)
;
@@ -265,7 +266,7 @@ static u32 make_nonsignal_event_id(struct kfd_process *p)
for (id = KFD_FIRST_NONSIGNAL_EVENT_ID;
id < KFD_LAST_NONSIGNAL_EVENT_ID &&
- lookup_event_by_id(p, id) != NULL;
+ lookup_event_by_id(p, id);
id++)
;
@@ -291,13 +292,13 @@ static int create_signal_event(struct file *devkfd,
struct kfd_event *ev)
{
if (p->signal_event_count == KFD_SIGNAL_EVENT_LIMIT) {
- pr_warn("amdkfd: Signal event wasn't created because limit was reached\n");
+ pr_warn("Signal event wasn't created because limit was reached\n");
return -ENOMEM;
}
if (!allocate_event_notification_slot(devkfd, p, &ev->signal_page,
&ev->signal_slot_index)) {
- pr_warn("amdkfd: Signal event wasn't created because out of kernel memory\n");
+ pr_warn("Signal event wasn't created because out of kernel memory\n");
return -ENOMEM;
}
@@ -309,11 +310,7 @@ static int create_signal_event(struct file *devkfd,
ev->event_id = make_signal_event_id(ev->signal_page,
ev->signal_slot_index);
- pr_debug("signal event number %zu created with id %d, address %p\n",
- p->signal_event_count, ev->event_id,
- ev->user_signal_address);
-
- pr_debug("signal event number %zu created with id %d, address %p\n",
+ pr_debug("Signal event number %zu created with id %d, address %p\n",
p->signal_event_count, ev->event_id,
ev->user_signal_address);
@@ -345,7 +342,7 @@ void kfd_event_init_process(struct kfd_process *p)
static void destroy_event(struct kfd_process *p, struct kfd_event *ev)
{
- if (ev->signal_page != NULL) {
+ if (ev->signal_page) {
release_event_notification_slot(ev->signal_page,
ev->signal_slot_index);
p->signal_event_count--;
@@ -584,7 +581,7 @@ void kfd_signal_event_interrupt(unsigned int pasid, uint32_t partial_id,
* search faster.
*/
struct signal_page *page;
- unsigned i;
+ unsigned int i;
list_for_each_entry(page, &p->signal_event_pages, event_pages)
for (i = 0; i < SLOTS_PER_PAGE; i++)
@@ -816,7 +813,7 @@ int kfd_event_mmap(struct kfd_process *p, struct vm_area_struct *vma)
/* check required size is logical */
if (get_order(KFD_SIGNAL_EVENT_LIMIT * 8) !=
get_order(vma->vm_end - vma->vm_start)) {
- pr_err("amdkfd: event page mmap requested illegal size\n");
+ pr_err("Event page mmap requested illegal size\n");
return -EINVAL;
}
@@ -825,7 +822,7 @@ int kfd_event_mmap(struct kfd_process *p, struct vm_area_struct *vma)
page = lookup_signal_page_by_index(p, page_index);
if (!page) {
/* Probably KFD bug, but mmap is user-accessible. */
- pr_debug("signal page could not be found for page_index %u\n",
+ pr_debug("Signal page could not be found for page_index %u\n",
page_index);
return -EINVAL;
}
@@ -836,7 +833,7 @@ int kfd_event_mmap(struct kfd_process *p, struct vm_area_struct *vma)
vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE
| VM_DONTDUMP | VM_PFNMAP;
- pr_debug("mapping signal page\n");
+ pr_debug("Mapping signal page\n");
pr_debug(" start user address == 0x%08lx\n", vma->vm_start);
pr_debug(" end user address == 0x%08lx\n", vma->vm_end);
pr_debug(" pfn == 0x%016lX\n", pfn);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
index 2b655103ba79..c59384bbbc5f 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
@@ -304,7 +304,7 @@ int kfd_init_apertures(struct kfd_process *process)
id < NUM_OF_SUPPORTED_GPUS) {
pdd = kfd_create_process_device_data(dev, process);
- if (pdd == NULL) {
+ if (!pdd) {
pr_err("Failed to create process device data\n");
return -1;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c b/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
index 7f134aa9bfd3..70b3a99cffc2 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
@@ -179,7 +179,7 @@ static void interrupt_wq(struct work_struct *work)
bool interrupt_is_wanted(struct kfd_dev *dev, const uint32_t *ih_ring_entry)
{
/* integer and bitwise OR so there is no boolean short-circuiting */
- unsigned wanted = 0;
+ unsigned int wanted = 0;
wanted |= dev->device_info->event_interrupt_class->interrupt_isr(dev,
ih_ring_entry);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
index d135cd002a95..681b639f5133 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
@@ -41,11 +41,11 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
int retval;
union PM4_MES_TYPE_3_HEADER nop;
- BUG_ON(!kq || !dev);
- BUG_ON(type != KFD_QUEUE_TYPE_DIQ && type != KFD_QUEUE_TYPE_HIQ);
+ if (WARN_ON(type != KFD_QUEUE_TYPE_DIQ && type != KFD_QUEUE_TYPE_HIQ))
+ return false;
- pr_debug("amdkfd: In func %s initializing queue type %d size %d\n",
- __func__, KFD_QUEUE_TYPE_HIQ, queue_size);
+ pr_debug("Initializing queue type %d size %d\n", KFD_QUEUE_TYPE_HIQ,
+ queue_size);
memset(&prop, 0, sizeof(prop));
memset(&nop, 0, sizeof(nop));
@@ -63,23 +63,23 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
KFD_MQD_TYPE_HIQ);
break;
default:
- BUG();
- break;
+ pr_err("Invalid queue type %d\n", type);
+ return false;
}
- if (kq->mqd == NULL)
+ if (!kq->mqd)
return false;
prop.doorbell_ptr = kfd_get_kernel_doorbell(dev, &prop.doorbell_off);
- if (prop.doorbell_ptr == NULL) {
- pr_err("amdkfd: error init doorbell");
+ if (!prop.doorbell_ptr) {
+ pr_err("Failed to initialize doorbell");
goto err_get_kernel_doorbell;
}
retval = kfd_gtt_sa_allocate(dev, queue_size, &kq->pq);
if (retval != 0) {
- pr_err("amdkfd: error init pq queues size (%d)\n", queue_size);
+ pr_err("Failed to init pq queues size %d\n", queue_size);
goto err_pq_allocate_vidmem;
}
@@ -87,7 +87,7 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
kq->pq_gpu_addr = kq->pq->gpu_addr;
retval = kq->ops_asic_specific.initialize(kq, dev, type, queue_size);
- if (retval == false)
+ if (!retval)
goto err_eop_allocate_vidmem;
retval = kfd_gtt_sa_allocate(dev, sizeof(*kq->rptr_kernel),
@@ -139,11 +139,12 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
/* assign HIQ to HQD */
if (type == KFD_QUEUE_TYPE_HIQ) {
- pr_debug("assigning hiq to hqd\n");
+ pr_debug("Assigning hiq to hqd\n");
kq->queue->pipe = KFD_CIK_HIQ_PIPE;
kq->queue->queue = KFD_CIK_HIQ_QUEUE;
kq->mqd->load_mqd(kq->mqd, kq->queue->mqd, kq->queue->pipe,
- kq->queue->queue, NULL);
+ kq->queue->queue, &kq->queue->properties,
+ NULL);
} else {
/* allocate fence for DIQ */
@@ -180,8 +181,6 @@ err_get_kernel_doorbell:
static void uninitialize(struct kernel_queue *kq)
{
- BUG_ON(!kq);
-
if (kq->queue->properties.type == KFD_QUEUE_TYPE_HIQ)
kq->mqd->destroy_mqd(kq->mqd,
NULL,
@@ -211,8 +210,6 @@ static int acquire_packet_buffer(struct kernel_queue *kq,
uint32_t wptr, rptr;
unsigned int *queue_address;
- BUG_ON(!kq || !buffer_ptr);
-
rptr = *kq->rptr_kernel;
wptr = *kq->wptr_kernel;
queue_address = (unsigned int *)kq->pq_kernel_addr;
@@ -252,11 +249,7 @@ static void submit_packet(struct kernel_queue *kq)
{
#ifdef DEBUG
int i;
-#endif
-
- BUG_ON(!kq);
-#ifdef DEBUG
for (i = *kq->wptr_kernel; i < kq->pending_wptr; i++) {
pr_debug("0x%2X ", kq->pq_kernel_addr[i]);
if (i % 15 == 0)
@@ -272,7 +265,6 @@ static void submit_packet(struct kernel_queue *kq)
static void rollback_packet(struct kernel_queue *kq)
{
- BUG_ON(!kq);
kq->pending_wptr = *kq->queue->properties.write_ptr;
}
@@ -281,9 +273,7 @@ struct kernel_queue *kernel_queue_init(struct kfd_dev *dev,
{
struct kernel_queue *kq;
- BUG_ON(!dev);
-
- kq = kzalloc(sizeof(struct kernel_queue), GFP_KERNEL);
+ kq = kzalloc(sizeof(*kq), GFP_KERNEL);
if (!kq)
return NULL;
@@ -304,7 +294,7 @@ struct kernel_queue *kernel_queue_init(struct kfd_dev *dev,
}
if (!kq->ops.initialize(kq, dev, type, KFD_KERNEL_QUEUE_SIZE)) {
- pr_err("amdkfd: failed to init kernel queue\n");
+ pr_err("Failed to init kernel queue\n");
kfree(kq);
return NULL;
}
@@ -313,32 +303,37 @@ struct kernel_queue *kernel_queue_init(struct kfd_dev *dev,
void kernel_queue_uninit(struct kernel_queue *kq)
{
- BUG_ON(!kq);
-
kq->ops.uninitialize(kq);
kfree(kq);
}
+/* FIXME: Can this test be removed? */
static __attribute__((unused)) void test_kq(struct kfd_dev *dev)
{
struct kernel_queue *kq;
uint32_t *buffer, i;
int retval;
- BUG_ON(!dev);
-
- pr_err("amdkfd: starting kernel queue test\n");
+ pr_err("Starting kernel queue test\n");
kq = kernel_queue_init(dev, KFD_QUEUE_TYPE_HIQ);
- BUG_ON(!kq);
+ if (unlikely(!kq)) {
+ pr_err(" Failed to initialize HIQ\n");
+ pr_err("Kernel queue test failed\n");
+ return;
+ }
retval = kq->ops.acquire_packet_buffer(kq, 5, &buffer);
- BUG_ON(retval != 0);
+ if (unlikely(retval != 0)) {
+ pr_err(" Failed to acquire packet buffer\n");
+ pr_err("Kernel queue test failed\n");
+ return;
+ }
for (i = 0; i < 5; i++)
buffer[i] = kq->nop_packet;
kq->ops.submit_packet(kq);
- pr_err("amdkfd: ending kernel queue test\n");
+ pr_err("Ending kernel queue test\n");
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_module.c b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
index 850a5623661f..0d73bea22c45 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_module.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
@@ -61,7 +61,8 @@ MODULE_PARM_DESC(send_sigterm,
static int amdkfd_init_completed;
-int kgd2kfd_init(unsigned interface_version, const struct kgd2kfd_calls **g2f)
+int kgd2kfd_init(unsigned int interface_version,
+ const struct kgd2kfd_calls **g2f)
{
if (!amdkfd_init_completed)
return -EPROBE_DEFER;
@@ -90,7 +91,7 @@ static int __init kfd_module_init(void)
/* Verify module parameters */
if ((sched_policy < KFD_SCHED_POLICY_HWS) ||
(sched_policy > KFD_SCHED_POLICY_NO_HWS)) {
- pr_err("kfd: sched_policy has invalid value\n");
+ pr_err("sched_policy has invalid value\n");
return -1;
}
@@ -98,13 +99,13 @@ static int __init kfd_module_init(void)
if ((max_num_of_queues_per_device < 1) ||
(max_num_of_queues_per_device >
KFD_MAX_NUM_OF_QUEUES_PER_DEVICE)) {
- pr_err("kfd: max_num_of_queues_per_device must be between 1 to KFD_MAX_NUM_OF_QUEUES_PER_DEVICE\n");
+ pr_err("max_num_of_queues_per_device must be between 1 to KFD_MAX_NUM_OF_QUEUES_PER_DEVICE\n");
return -1;
}
err = kfd_pasid_init();
if (err < 0)
- goto err_pasid;
+ return err;
err = kfd_chardev_init();
if (err < 0)
@@ -126,7 +127,6 @@ err_topology:
kfd_chardev_exit();
err_ioctl:
kfd_pasid_exit();
-err_pasid:
return err;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
index 213a71e0b6c7..1f3a6ba7eed2 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
@@ -67,7 +67,8 @@ struct mqd_manager {
int (*load_mqd)(struct mqd_manager *mm, void *mqd,
uint32_t pipe_id, uint32_t queue_id,
- uint32_t __user *wptr);
+ struct queue_properties *p,
+ struct mm_struct *mms);
int (*update_mqd)(struct mqd_manager *mm, void *mqd,
struct queue_properties *q);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
index 6acc4313363e..44ffd23348fc 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
@@ -44,10 +44,6 @@ static int init_mqd(struct mqd_manager *mm, void **mqd,
struct cik_mqd *m;
int retval;
- BUG_ON(!mm || !q || !mqd);
-
- pr_debug("kfd: In func %s\n", __func__);
-
retval = kfd_gtt_sa_allocate(mm->dev, sizeof(struct cik_mqd),
mqd_mem_obj);
@@ -101,7 +97,7 @@ static int init_mqd(struct mqd_manager *mm, void **mqd,
m->cp_hqd_iq_rptr = AQL_ENABLE;
*mqd = m;
- if (gart_addr != NULL)
+ if (gart_addr)
*gart_addr = addr;
retval = mm->update_mqd(mm, m, q);
@@ -115,8 +111,6 @@ static int init_mqd_sdma(struct mqd_manager *mm, void **mqd,
int retval;
struct cik_sdma_rlc_registers *m;
- BUG_ON(!mm || !mqd || !mqd_mem_obj);
-
retval = kfd_gtt_sa_allocate(mm->dev,
sizeof(struct cik_sdma_rlc_registers),
mqd_mem_obj);
@@ -129,7 +123,7 @@ static int init_mqd_sdma(struct mqd_manager *mm, void **mqd,
memset(m, 0, sizeof(struct cik_sdma_rlc_registers));
*mqd = m;
- if (gart_addr != NULL)
+ if (gart_addr)
*gart_addr = (*mqd_mem_obj)->gpu_addr;
retval = mm->update_mqd(mm, m, q);
@@ -140,27 +134,31 @@ static int init_mqd_sdma(struct mqd_manager *mm, void **mqd,
static void uninit_mqd(struct mqd_manager *mm, void *mqd,
struct kfd_mem_obj *mqd_mem_obj)
{
- BUG_ON(!mm || !mqd);
kfd_gtt_sa_free(mm->dev, mqd_mem_obj);
}
static void uninit_mqd_sdma(struct mqd_manager *mm, void *mqd,
struct kfd_mem_obj *mqd_mem_obj)
{
- BUG_ON(!mm || !mqd);
kfd_gtt_sa_free(mm->dev, mqd_mem_obj);
}
static int load_mqd(struct mqd_manager *mm, void *mqd, uint32_t pipe_id,
- uint32_t queue_id, uint32_t __user *wptr)
+ uint32_t queue_id, struct queue_properties *p,
+ struct mm_struct *mms)
{
- return mm->dev->kfd2kgd->hqd_load
- (mm->dev->kgd, mqd, pipe_id, queue_id, wptr);
+ /* AQL write pointer counts in 64B packets, PM4/CP counts in dwords. */
+ uint32_t wptr_shift = (p->format == KFD_QUEUE_FORMAT_AQL ? 4 : 0);
+ uint32_t wptr_mask = (uint32_t)((p->queue_size / sizeof(uint32_t)) - 1);
+
+ return mm->dev->kfd2kgd->hqd_load(mm->dev->kgd, mqd, pipe_id, queue_id,
+ (uint32_t __user *)p->write_ptr,
+ wptr_shift, wptr_mask, mms);
}
static int load_mqd_sdma(struct mqd_manager *mm, void *mqd,
- uint32_t pipe_id, uint32_t queue_id,
- uint32_t __user *wptr)
+ uint32_t pipe_id, uint32_t queue_id,
+ struct queue_properties *p, struct mm_struct *mms)
{
return mm->dev->kfd2kgd->hqd_sdma_load(mm->dev->kgd, mqd);
}
@@ -170,10 +168,6 @@ static int update_mqd(struct mqd_manager *mm, void *mqd,
{
struct cik_mqd *m;
- BUG_ON(!mm || !q || !mqd);
-
- pr_debug("kfd: In func %s\n", __func__);
-
m = get_mqd(mqd);
m->cp_hqd_pq_control = DEFAULT_RPTR_BLOCK_SIZE |
DEFAULT_MIN_AVAIL_SIZE | PQ_ATC_EN;
@@ -188,21 +182,17 @@ static int update_mqd(struct mqd_manager *mm, void *mqd,
m->cp_hqd_pq_base_hi = upper_32_bits((uint64_t)q->queue_address >> 8);
m->cp_hqd_pq_rptr_report_addr_lo = lower_32_bits((uint64_t)q->read_ptr);
m->cp_hqd_pq_rptr_report_addr_hi = upper_32_bits((uint64_t)q->read_ptr);
- m->cp_hqd_pq_doorbell_control = DOORBELL_EN |
- DOORBELL_OFFSET(q->doorbell_off);
+ m->cp_hqd_pq_doorbell_control = DOORBELL_OFFSET(q->doorbell_off);
m->cp_hqd_vmid = q->vmid;
- if (q->format == KFD_QUEUE_FORMAT_AQL) {
+ if (q->format == KFD_QUEUE_FORMAT_AQL)
m->cp_hqd_pq_control |= NO_UPDATE_RPTR;
- }
- m->cp_hqd_active = 0;
q->is_active = false;
if (q->queue_size > 0 &&
q->queue_address != 0 &&
q->queue_percent > 0) {
- m->cp_hqd_active = 1;
q->is_active = true;
}
@@ -214,8 +204,6 @@ static int update_mqd_sdma(struct mqd_manager *mm, void *mqd,
{
struct cik_sdma_rlc_registers *m;
- BUG_ON(!mm || !mqd || !q);
-
m = get_sdma_mqd(mqd);
m->sdma_rlc_rb_cntl = ffs(q->queue_size / sizeof(unsigned int)) <<
SDMA0_RLC0_RB_CNTL__RB_SIZE__SHIFT |
@@ -254,7 +242,7 @@ static int destroy_mqd(struct mqd_manager *mm, void *mqd,
unsigned int timeout, uint32_t pipe_id,
uint32_t queue_id)
{
- return mm->dev->kfd2kgd->hqd_destroy(mm->dev->kgd, type, timeout,
+ return mm->dev->kfd2kgd->hqd_destroy(mm->dev->kgd, mqd, type, timeout,
pipe_id, queue_id);
}
@@ -301,10 +289,6 @@ static int init_mqd_hiq(struct mqd_manager *mm, void **mqd,
struct cik_mqd *m;
int retval;
- BUG_ON(!mm || !q || !mqd || !mqd_mem_obj);
-
- pr_debug("kfd: In func %s\n", __func__);
-
retval = kfd_gtt_sa_allocate(mm->dev, sizeof(struct cik_mqd),
mqd_mem_obj);
@@ -359,10 +343,6 @@ static int update_mqd_hiq(struct mqd_manager *mm, void *mqd,
{
struct cik_mqd *m;
- BUG_ON(!mm || !q || !mqd);
-
- pr_debug("kfd: In func %s\n", __func__);
-
m = get_mqd(mqd);
m->cp_hqd_pq_control = DEFAULT_RPTR_BLOCK_SIZE |
DEFAULT_MIN_AVAIL_SIZE |
@@ -400,8 +380,6 @@ struct cik_sdma_rlc_registers *get_sdma_mqd(void *mqd)
{
struct cik_sdma_rlc_registers *m;
- BUG_ON(!mqd);
-
m = (struct cik_sdma_rlc_registers *)mqd;
return m;
@@ -412,12 +390,10 @@ struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type,
{
struct mqd_manager *mqd;
- BUG_ON(!dev);
- BUG_ON(type >= KFD_MQD_TYPE_MAX);
-
- pr_debug("kfd: In func %s\n", __func__);
+ if (WARN_ON(type >= KFD_MQD_TYPE_MAX))
+ return NULL;
- mqd = kzalloc(sizeof(struct mqd_manager), GFP_KERNEL);
+ mqd = kzalloc(sizeof(*mqd), GFP_KERNEL);
if (!mqd)
return NULL;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
index a9b9882a9a77..73cbfe186dd2 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
@@ -85,7 +85,7 @@ static int init_mqd(struct mqd_manager *mm, void **mqd,
m->cp_hqd_iq_rptr = 1;
*mqd = m;
- if (gart_addr != NULL)
+ if (gart_addr)
*gart_addr = addr;
retval = mm->update_mqd(mm, m, q);
@@ -94,10 +94,15 @@ static int init_mqd(struct mqd_manager *mm, void **mqd,
static int load_mqd(struct mqd_manager *mm, void *mqd,
uint32_t pipe_id, uint32_t queue_id,
- uint32_t __user *wptr)
+ struct queue_properties *p, struct mm_struct *mms)
{
- return mm->dev->kfd2kgd->hqd_load
- (mm->dev->kgd, mqd, pipe_id, queue_id, wptr);
+ /* AQL write pointer counts in 64B packets, PM4/CP counts in dwords. */
+ uint32_t wptr_shift = (p->format == KFD_QUEUE_FORMAT_AQL ? 4 : 0);
+ uint32_t wptr_mask = (uint32_t)((p->queue_size / sizeof(uint32_t)) - 1);
+
+ return mm->dev->kfd2kgd->hqd_load(mm->dev->kgd, mqd, pipe_id, queue_id,
+ (uint32_t __user *)p->write_ptr,
+ wptr_shift, wptr_mask, mms);
}
static int __update_mqd(struct mqd_manager *mm, void *mqd,
@@ -106,10 +111,6 @@ static int __update_mqd(struct mqd_manager *mm, void *mqd,
{
struct vi_mqd *m;
- BUG_ON(!mm || !q || !mqd);
-
- pr_debug("kfd: In func %s\n", __func__);
-
m = get_mqd(mqd);
m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT |
@@ -117,7 +118,7 @@ static int __update_mqd(struct mqd_manager *mm, void *mqd,
mtype << CP_HQD_PQ_CONTROL__MTYPE__SHIFT;
m->cp_hqd_pq_control |=
ffs(q->queue_size / sizeof(unsigned int)) - 1 - 1;
- pr_debug("kfd: cp_hqd_pq_control 0x%x\n", m->cp_hqd_pq_control);
+ pr_debug("cp_hqd_pq_control 0x%x\n", m->cp_hqd_pq_control);
m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8);
m->cp_hqd_pq_base_hi = upper_32_bits((uint64_t)q->queue_address >> 8);
@@ -126,10 +127,9 @@ static int __update_mqd(struct mqd_manager *mm, void *mqd,
m->cp_hqd_pq_rptr_report_addr_hi = upper_32_bits((uint64_t)q->read_ptr);
m->cp_hqd_pq_doorbell_control =
- 1 << CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_EN__SHIFT |
q->doorbell_off <<
CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT;
- pr_debug("kfd: cp_hqd_pq_doorbell_control 0x%x\n",
+ pr_debug("cp_hqd_pq_doorbell_control 0x%x\n",
m->cp_hqd_pq_doorbell_control);
m->cp_hqd_eop_control = atc_bit << CP_HQD_EOP_CONTROL__EOP_ATC__SHIFT |
@@ -139,8 +139,15 @@ static int __update_mqd(struct mqd_manager *mm, void *mqd,
3 << CP_HQD_IB_CONTROL__MIN_IB_AVAIL_SIZE__SHIFT |
mtype << CP_HQD_IB_CONTROL__MTYPE__SHIFT;
- m->cp_hqd_eop_control |=
- ffs(q->eop_ring_buffer_size / sizeof(unsigned int)) - 1 - 1;
+ /*
+ * HW does not clamp this field correctly. Maximum EOP queue size
+ * is constrained by per-SE EOP done signal count, which is 8-bit.
+ * Limit is 0xFF EOP entries (= 0x7F8 dwords). CP will not submit
+ * more than (EOP entry count - 1) so a queue size of 0x800 dwords
+ * is safe, giving a maximum field value of 0xA.
+ */
+ m->cp_hqd_eop_control |= min(0xA,
+ ffs(q->eop_ring_buffer_size / sizeof(unsigned int)) - 1 - 1);
m->cp_hqd_eop_base_addr_lo =
lower_32_bits(q->eop_ring_buffer_address >> 8);
m->cp_hqd_eop_base_addr_hi =
@@ -156,12 +163,10 @@ static int __update_mqd(struct mqd_manager *mm, void *mqd,
2 << CP_HQD_PQ_CONTROL__SLOT_BASED_WPTR__SHIFT;
}
- m->cp_hqd_active = 0;
q->is_active = false;
if (q->queue_size > 0 &&
q->queue_address != 0 &&
q->queue_percent > 0) {
- m->cp_hqd_active = 1;
q->is_active = true;
}
@@ -181,14 +186,13 @@ static int destroy_mqd(struct mqd_manager *mm, void *mqd,
uint32_t queue_id)
{
return mm->dev->kfd2kgd->hqd_destroy
- (mm->dev->kgd, type, timeout,
+ (mm->dev->kgd, mqd, type, timeout,
pipe_id, queue_id);
}
static void uninit_mqd(struct mqd_manager *mm, void *mqd,
struct kfd_mem_obj *mqd_mem_obj)
{
- BUG_ON(!mm || !mqd);
kfd_gtt_sa_free(mm->dev, mqd_mem_obj);
}
@@ -238,12 +242,10 @@ struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type,
{
struct mqd_manager *mqd;
- BUG_ON(!dev);
- BUG_ON(type >= KFD_MQD_TYPE_MAX);
-
- pr_debug("kfd: In func %s\n", __func__);
+ if (WARN_ON(type >= KFD_MQD_TYPE_MAX))
+ return NULL;
- mqd = kzalloc(sizeof(struct mqd_manager), GFP_KERNEL);
+ mqd = kzalloc(sizeof(*mqd), GFP_KERNEL);
if (!mqd)
return NULL;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
index 7131998848d7..1d312603de9f 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
@@ -26,7 +26,6 @@
#include "kfd_device_queue_manager.h"
#include "kfd_kernel_queue.h"
#include "kfd_priv.h"
-#include "kfd_pm4_headers.h"
#include "kfd_pm4_headers_vi.h"
#include "kfd_pm4_opcodes.h"
@@ -35,7 +34,8 @@ static inline void inc_wptr(unsigned int *wptr, unsigned int increment_bytes,
{
unsigned int temp = *wptr + increment_bytes / sizeof(uint32_t);
- BUG_ON((temp * sizeof(uint32_t)) > buffer_size_bytes);
+ WARN((temp * sizeof(uint32_t)) > buffer_size_bytes,
+ "Runlist IB overflow");
*wptr = temp;
}
@@ -43,12 +43,12 @@ static unsigned int build_pm4_header(unsigned int opcode, size_t packet_size)
{
union PM4_MES_TYPE_3_HEADER header;
- header.u32all = 0;
+ header.u32All = 0;
header.opcode = opcode;
header.count = packet_size/sizeof(uint32_t) - 2;
header.type = PM4_TYPE_3;
- return header.u32all;
+ return header.u32All;
}
static void pm_calc_rlib_size(struct packet_manager *pm,
@@ -58,8 +58,6 @@ static void pm_calc_rlib_size(struct packet_manager *pm,
unsigned int process_count, queue_count;
unsigned int map_queue_size;
- BUG_ON(!pm || !rlib_size || !over_subscription);
-
process_count = pm->dqm->processes_count;
queue_count = pm->dqm->queue_count;
@@ -67,15 +65,12 @@ static void pm_calc_rlib_size(struct packet_manager *pm,
*over_subscription = false;
if ((process_count > 1) || queue_count > get_queues_num(pm->dqm)) {
*over_subscription = true;
- pr_debug("kfd: over subscribed runlist\n");
+ pr_debug("Over subscribed runlist\n");
}
- map_queue_size =
- (pm->dqm->dev->device_info->asic_family == CHIP_CARRIZO) ?
- sizeof(struct pm4_mes_map_queues) :
- sizeof(struct pm4_map_queues);
+ map_queue_size = sizeof(struct pm4_mes_map_queues);
/* calculate run list ib allocation size */
- *rlib_size = process_count * sizeof(struct pm4_map_process) +
+ *rlib_size = process_count * sizeof(struct pm4_mes_map_process) +
queue_count * map_queue_size;
/*
@@ -83,9 +78,9 @@ static void pm_calc_rlib_size(struct packet_manager *pm,
* when over subscription
*/
if (*over_subscription)
- *rlib_size += sizeof(struct pm4_runlist);
+ *rlib_size += sizeof(struct pm4_mes_runlist);
- pr_debug("kfd: runlist ib size %d\n", *rlib_size);
+ pr_debug("runlist ib size %d\n", *rlib_size);
}
static int pm_allocate_runlist_ib(struct packet_manager *pm,
@@ -96,17 +91,16 @@ static int pm_allocate_runlist_ib(struct packet_manager *pm,
{
int retval;
- BUG_ON(!pm);
- BUG_ON(pm->allocated);
- BUG_ON(is_over_subscription == NULL);
+ if (WARN_ON(pm->allocated))
+ return -EINVAL;
pm_calc_rlib_size(pm, rl_buffer_size, is_over_subscription);
retval = kfd_gtt_sa_allocate(pm->dqm->dev, *rl_buffer_size,
&pm->ib_buffer_obj);
- if (retval != 0) {
- pr_err("kfd: failed to allocate runlist IB\n");
+ if (retval) {
+ pr_err("Failed to allocate runlist IB\n");
return retval;
}
@@ -121,15 +115,16 @@ static int pm_allocate_runlist_ib(struct packet_manager *pm,
static int pm_create_runlist(struct packet_manager *pm, uint32_t *buffer,
uint64_t ib, size_t ib_size_in_dwords, bool chain)
{
- struct pm4_runlist *packet;
+ struct pm4_mes_runlist *packet;
- BUG_ON(!pm || !buffer || !ib);
+ if (WARN_ON(!ib))
+ return -EFAULT;
- packet = (struct pm4_runlist *)buffer;
+ packet = (struct pm4_mes_runlist *)buffer;
- memset(buffer, 0, sizeof(struct pm4_runlist));
- packet->header.u32all = build_pm4_header(IT_RUN_LIST,
- sizeof(struct pm4_runlist));
+ memset(buffer, 0, sizeof(struct pm4_mes_runlist));
+ packet->header.u32All = build_pm4_header(IT_RUN_LIST,
+ sizeof(struct pm4_mes_runlist));
packet->bitfields4.ib_size = ib_size_in_dwords;
packet->bitfields4.chain = chain ? 1 : 0;
@@ -144,20 +139,16 @@ static int pm_create_runlist(struct packet_manager *pm, uint32_t *buffer,
static int pm_create_map_process(struct packet_manager *pm, uint32_t *buffer,
struct qcm_process_device *qpd)
{
- struct pm4_map_process *packet;
+ struct pm4_mes_map_process *packet;
struct queue *cur;
uint32_t num_queues;
- BUG_ON(!pm || !buffer || !qpd);
-
- packet = (struct pm4_map_process *)buffer;
-
- pr_debug("kfd: In func %s\n", __func__);
+ packet = (struct pm4_mes_map_process *)buffer;
- memset(buffer, 0, sizeof(struct pm4_map_process));
+ memset(buffer, 0, sizeof(struct pm4_mes_map_process));
- packet->header.u32all = build_pm4_header(IT_MAP_PROCESS,
- sizeof(struct pm4_map_process));
+ packet->header.u32All = build_pm4_header(IT_MAP_PROCESS,
+ sizeof(struct pm4_mes_map_process));
packet->bitfields2.diq_enable = (qpd->is_debug) ? 1 : 0;
packet->bitfields2.process_quantum = 1;
packet->bitfields2.pasid = qpd->pqm->process->pasid;
@@ -175,27 +166,26 @@ static int pm_create_map_process(struct packet_manager *pm, uint32_t *buffer,
packet->sh_mem_ape1_base = qpd->sh_mem_ape1_base;
packet->sh_mem_ape1_limit = qpd->sh_mem_ape1_limit;
+ /* TODO: scratch support */
+ packet->sh_hidden_private_base_vmid = 0;
+
packet->gds_addr_lo = lower_32_bits(qpd->gds_context_area);
packet->gds_addr_hi = upper_32_bits(qpd->gds_context_area);
return 0;
}
-static int pm_create_map_queue_vi(struct packet_manager *pm, uint32_t *buffer,
+static int pm_create_map_queue(struct packet_manager *pm, uint32_t *buffer,
struct queue *q, bool is_static)
{
struct pm4_mes_map_queues *packet;
bool use_static = is_static;
- BUG_ON(!pm || !buffer || !q);
-
- pr_debug("kfd: In func %s\n", __func__);
-
packet = (struct pm4_mes_map_queues *)buffer;
- memset(buffer, 0, sizeof(struct pm4_map_queues));
+ memset(buffer, 0, sizeof(struct pm4_mes_map_queues));
- packet->header.u32all = build_pm4_header(IT_MAP_QUEUES,
- sizeof(struct pm4_map_queues));
+ packet->header.u32All = build_pm4_header(IT_MAP_QUEUES,
+ sizeof(struct pm4_mes_map_queues));
packet->bitfields2.alloc_format =
alloc_format__mes_map_queues__one_per_pipe_vi;
packet->bitfields2.num_queues = 1;
@@ -223,10 +213,8 @@ static int pm_create_map_queue_vi(struct packet_manager *pm, uint32_t *buffer,
use_static = false; /* no static queues under SDMA */
break;
default:
- pr_err("kfd: in %s queue type %d\n", __func__,
- q->properties.type);
- BUG();
- break;
+ WARN(1, "queue type %d", q->properties.type);
+ return -EINVAL;
}
packet->bitfields3.doorbell_offset =
q->properties.doorbell_off;
@@ -246,68 +234,6 @@ static int pm_create_map_queue_vi(struct packet_manager *pm, uint32_t *buffer,
return 0;
}
-static int pm_create_map_queue(struct packet_manager *pm, uint32_t *buffer,
- struct queue *q, bool is_static)
-{
- struct pm4_map_queues *packet;
- bool use_static = is_static;
-
- BUG_ON(!pm || !buffer || !q);
-
- pr_debug("kfd: In func %s\n", __func__);
-
- packet = (struct pm4_map_queues *)buffer;
- memset(buffer, 0, sizeof(struct pm4_map_queues));
-
- packet->header.u32all = build_pm4_header(IT_MAP_QUEUES,
- sizeof(struct pm4_map_queues));
- packet->bitfields2.alloc_format =
- alloc_format__mes_map_queues__one_per_pipe;
- packet->bitfields2.num_queues = 1;
- packet->bitfields2.queue_sel =
- queue_sel__mes_map_queues__map_to_hws_determined_queue_slots;
-
- packet->bitfields2.vidmem = (q->properties.is_interop) ?
- vidmem__mes_map_queues__uses_video_memory :
- vidmem__mes_map_queues__uses_no_video_memory;
-
- switch (q->properties.type) {
- case KFD_QUEUE_TYPE_COMPUTE:
- case KFD_QUEUE_TYPE_DIQ:
- packet->bitfields2.engine_sel =
- engine_sel__mes_map_queues__compute;
- break;
- case KFD_QUEUE_TYPE_SDMA:
- packet->bitfields2.engine_sel =
- engine_sel__mes_map_queues__sdma0;
- use_static = false; /* no static queues under SDMA */
- break;
- default:
- BUG();
- break;
- }
-
- packet->mes_map_queues_ordinals[0].bitfields3.doorbell_offset =
- q->properties.doorbell_off;
-
- packet->mes_map_queues_ordinals[0].bitfields3.is_static =
- (use_static) ? 1 : 0;
-
- packet->mes_map_queues_ordinals[0].mqd_addr_lo =
- lower_32_bits(q->gart_mqd_addr);
-
- packet->mes_map_queues_ordinals[0].mqd_addr_hi =
- upper_32_bits(q->gart_mqd_addr);
-
- packet->mes_map_queues_ordinals[0].wptr_addr_lo =
- lower_32_bits((uint64_t)q->properties.write_ptr);
-
- packet->mes_map_queues_ordinals[0].wptr_addr_hi =
- upper_32_bits((uint64_t)q->properties.write_ptr);
-
- return 0;
-}
-
static int pm_create_runlist_ib(struct packet_manager *pm,
struct list_head *queues,
uint64_t *rl_gpu_addr,
@@ -322,19 +248,16 @@ static int pm_create_runlist_ib(struct packet_manager *pm,
struct kernel_queue *kq;
bool is_over_subscription;
- BUG_ON(!pm || !queues || !rl_size_bytes || !rl_gpu_addr);
-
rl_wptr = retval = proccesses_mapped = 0;
retval = pm_allocate_runlist_ib(pm, &rl_buffer, rl_gpu_addr,
&alloc_size_bytes, &is_over_subscription);
- if (retval != 0)
+ if (retval)
return retval;
*rl_size_bytes = alloc_size_bytes;
- pr_debug("kfd: In func %s\n", __func__);
- pr_debug("kfd: building runlist ib process count: %d queues count %d\n",
+ pr_debug("Building runlist ib process count: %d queues count %d\n",
pm->dqm->processes_count, pm->dqm->queue_count);
/* build the run list ib packet */
@@ -342,42 +265,35 @@ static int pm_create_runlist_ib(struct packet_manager *pm,
qpd = cur->qpd;
/* build map process packet */
if (proccesses_mapped >= pm->dqm->processes_count) {
- pr_debug("kfd: not enough space left in runlist IB\n");
+ pr_debug("Not enough space left in runlist IB\n");
pm_release_ib(pm);
return -ENOMEM;
}
retval = pm_create_map_process(pm, &rl_buffer[rl_wptr], qpd);
- if (retval != 0)
+ if (retval)
return retval;
proccesses_mapped++;
- inc_wptr(&rl_wptr, sizeof(struct pm4_map_process),
+ inc_wptr(&rl_wptr, sizeof(struct pm4_mes_map_process),
alloc_size_bytes);
list_for_each_entry(kq, &qpd->priv_queue_list, list) {
if (!kq->queue->properties.is_active)
continue;
- pr_debug("kfd: static_queue, mapping kernel q %d, is debug status %d\n",
+ pr_debug("static_queue, mapping kernel q %d, is debug status %d\n",
kq->queue->queue, qpd->is_debug);
- if (pm->dqm->dev->device_info->asic_family ==
- CHIP_CARRIZO)
- retval = pm_create_map_queue_vi(pm,
- &rl_buffer[rl_wptr],
- kq->queue,
- qpd->is_debug);
- else
- retval = pm_create_map_queue(pm,
+ retval = pm_create_map_queue(pm,
&rl_buffer[rl_wptr],
kq->queue,
qpd->is_debug);
- if (retval != 0)
+ if (retval)
return retval;
inc_wptr(&rl_wptr,
- sizeof(struct pm4_map_queues),
+ sizeof(struct pm4_mes_map_queues),
alloc_size_bytes);
}
@@ -385,51 +301,44 @@ static int pm_create_runlist_ib(struct packet_manager *pm,
if (!q->properties.is_active)
continue;
- pr_debug("kfd: static_queue, mapping user queue %d, is debug status %d\n",
+ pr_debug("static_queue, mapping user queue %d, is debug status %d\n",
q->queue, qpd->is_debug);
- if (pm->dqm->dev->device_info->asic_family ==
- CHIP_CARRIZO)
- retval = pm_create_map_queue_vi(pm,
- &rl_buffer[rl_wptr],
- q,
- qpd->is_debug);
- else
- retval = pm_create_map_queue(pm,
+ retval = pm_create_map_queue(pm,
&rl_buffer[rl_wptr],
q,
qpd->is_debug);
- if (retval != 0)
+ if (retval)
return retval;
inc_wptr(&rl_wptr,
- sizeof(struct pm4_map_queues),
+ sizeof(struct pm4_mes_map_queues),
alloc_size_bytes);
}
}
- pr_debug("kfd: finished map process and queues to runlist\n");
+ pr_debug("Finished map process and queues to runlist\n");
if (is_over_subscription)
- pm_create_runlist(pm, &rl_buffer[rl_wptr], *rl_gpu_addr,
- alloc_size_bytes / sizeof(uint32_t), true);
+ retval = pm_create_runlist(pm, &rl_buffer[rl_wptr],
+ *rl_gpu_addr,
+ alloc_size_bytes / sizeof(uint32_t),
+ true);
for (i = 0; i < alloc_size_bytes / sizeof(uint32_t); i++)
pr_debug("0x%2X ", rl_buffer[i]);
pr_debug("\n");
- return 0;
+ return retval;
}
int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm)
{
- BUG_ON(!dqm);
-
pm->dqm = dqm;
mutex_init(&pm->lock);
pm->priv_queue = kernel_queue_init(dqm->dev, KFD_QUEUE_TYPE_HIQ);
- if (pm->priv_queue == NULL) {
+ if (!pm->priv_queue) {
mutex_destroy(&pm->lock);
return -ENOMEM;
}
@@ -440,8 +349,6 @@ int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm)
void pm_uninit(struct packet_manager *pm)
{
- BUG_ON(!pm);
-
mutex_destroy(&pm->lock);
kernel_queue_uninit(pm->priv_queue);
}
@@ -449,25 +356,22 @@ void pm_uninit(struct packet_manager *pm)
int pm_send_set_resources(struct packet_manager *pm,
struct scheduling_resources *res)
{
- struct pm4_set_resources *packet;
-
- BUG_ON(!pm || !res);
-
- pr_debug("kfd: In func %s\n", __func__);
+ struct pm4_mes_set_resources *packet;
+ int retval = 0;
mutex_lock(&pm->lock);
pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue,
sizeof(*packet) / sizeof(uint32_t),
- (unsigned int **)&packet);
- if (packet == NULL) {
- mutex_unlock(&pm->lock);
- pr_err("kfd: failed to allocate buffer on kernel queue\n");
- return -ENOMEM;
+ (unsigned int **)&packet);
+ if (!packet) {
+ pr_err("Failed to allocate buffer on kernel queue\n");
+ retval = -ENOMEM;
+ goto out;
}
- memset(packet, 0, sizeof(struct pm4_set_resources));
- packet->header.u32all = build_pm4_header(IT_SET_RESOURCES,
- sizeof(struct pm4_set_resources));
+ memset(packet, 0, sizeof(struct pm4_mes_set_resources));
+ packet->header.u32All = build_pm4_header(IT_SET_RESOURCES,
+ sizeof(struct pm4_mes_set_resources));
packet->bitfields2.queue_type =
queue_type__mes_set_resources__hsa_interface_queue_hiq;
@@ -485,9 +389,10 @@ int pm_send_set_resources(struct packet_manager *pm,
pm->priv_queue->ops.submit_packet(pm->priv_queue);
+out:
mutex_unlock(&pm->lock);
- return 0;
+ return retval;
}
int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues)
@@ -497,26 +402,24 @@ int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues)
size_t rl_ib_size, packet_size_dwords;
int retval;
- BUG_ON(!pm || !dqm_queues);
-
retval = pm_create_runlist_ib(pm, dqm_queues, &rl_gpu_ib_addr,
&rl_ib_size);
- if (retval != 0)
+ if (retval)
goto fail_create_runlist_ib;
- pr_debug("kfd: runlist IB address: 0x%llX\n", rl_gpu_ib_addr);
+ pr_debug("runlist IB address: 0x%llX\n", rl_gpu_ib_addr);
- packet_size_dwords = sizeof(struct pm4_runlist) / sizeof(uint32_t);
+ packet_size_dwords = sizeof(struct pm4_mes_runlist) / sizeof(uint32_t);
mutex_lock(&pm->lock);
retval = pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue,
packet_size_dwords, &rl_buffer);
- if (retval != 0)
+ if (retval)
goto fail_acquire_packet_buffer;
retval = pm_create_runlist(pm, rl_buffer, rl_gpu_ib_addr,
rl_ib_size / sizeof(uint32_t), false);
- if (retval != 0)
+ if (retval)
goto fail_create_runlist;
pm->priv_queue->ops.submit_packet(pm->priv_queue);
@@ -530,8 +433,7 @@ fail_create_runlist:
fail_acquire_packet_buffer:
mutex_unlock(&pm->lock);
fail_create_runlist_ib:
- if (pm->allocated)
- pm_release_ib(pm);
+ pm_release_ib(pm);
return retval;
}
@@ -539,20 +441,21 @@ int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
uint32_t fence_value)
{
int retval;
- struct pm4_query_status *packet;
+ struct pm4_mes_query_status *packet;
- BUG_ON(!pm || !fence_address);
+ if (WARN_ON(!fence_address))
+ return -EFAULT;
mutex_lock(&pm->lock);
retval = pm->priv_queue->ops.acquire_packet_buffer(
pm->priv_queue,
- sizeof(struct pm4_query_status) / sizeof(uint32_t),
+ sizeof(struct pm4_mes_query_status) / sizeof(uint32_t),
(unsigned int **)&packet);
- if (retval != 0)
+ if (retval)
goto fail_acquire_packet_buffer;
- packet->header.u32all = build_pm4_header(IT_QUERY_STATUS,
- sizeof(struct pm4_query_status));
+ packet->header.u32All = build_pm4_header(IT_QUERY_STATUS,
+ sizeof(struct pm4_mes_query_status));
packet->bitfields2.context_id = 0;
packet->bitfields2.interrupt_sel =
@@ -566,9 +469,6 @@ int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
packet->data_lo = lower_32_bits((uint64_t)fence_value);
pm->priv_queue->ops.submit_packet(pm->priv_queue);
- mutex_unlock(&pm->lock);
-
- return 0;
fail_acquire_packet_buffer:
mutex_unlock(&pm->lock);
@@ -582,24 +482,22 @@ int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
{
int retval;
uint32_t *buffer;
- struct pm4_unmap_queues *packet;
-
- BUG_ON(!pm);
+ struct pm4_mes_unmap_queues *packet;
mutex_lock(&pm->lock);
retval = pm->priv_queue->ops.acquire_packet_buffer(
pm->priv_queue,
- sizeof(struct pm4_unmap_queues) / sizeof(uint32_t),
+ sizeof(struct pm4_mes_unmap_queues) / sizeof(uint32_t),
&buffer);
- if (retval != 0)
+ if (retval)
goto err_acquire_packet_buffer;
- packet = (struct pm4_unmap_queues *)buffer;
- memset(buffer, 0, sizeof(struct pm4_unmap_queues));
- pr_debug("kfd: static_queue: unmapping queues: mode is %d , reset is %d , type is %d\n",
+ packet = (struct pm4_mes_unmap_queues *)buffer;
+ memset(buffer, 0, sizeof(struct pm4_mes_unmap_queues));
+ pr_debug("static_queue: unmapping queues: mode is %d , reset is %d , type is %d\n",
mode, reset, type);
- packet->header.u32all = build_pm4_header(IT_UNMAP_QUEUES,
- sizeof(struct pm4_unmap_queues));
+ packet->header.u32All = build_pm4_header(IT_UNMAP_QUEUES,
+ sizeof(struct pm4_mes_unmap_queues));
switch (type) {
case KFD_QUEUE_TYPE_COMPUTE:
case KFD_QUEUE_TYPE_DIQ:
@@ -611,8 +509,9 @@ int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
engine_sel__mes_unmap_queues__sdma0 + sdma_engine;
break;
default:
- BUG();
- break;
+ WARN(1, "queue type %d", type);
+ retval = -EINVAL;
+ goto err_invalid;
}
if (reset)
@@ -636,16 +535,17 @@ int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
break;
case KFD_PREEMPT_TYPE_FILTER_ALL_QUEUES:
packet->bitfields2.queue_sel =
- queue_sel__mes_unmap_queues__perform_request_on_all_active_queues;
+ queue_sel__mes_unmap_queues__unmap_all_queues;
break;
case KFD_PREEMPT_TYPE_FILTER_DYNAMIC_QUEUES:
/* in this case, we do not preempt static queues */
packet->bitfields2.queue_sel =
- queue_sel__mes_unmap_queues__perform_request_on_dynamic_queues_only;
+ queue_sel__mes_unmap_queues__unmap_all_non_static_queues;
break;
default:
- BUG();
- break;
+ WARN(1, "filter %d", mode);
+ retval = -EINVAL;
+ goto err_invalid;
}
pm->priv_queue->ops.submit_packet(pm->priv_queue);
@@ -653,6 +553,8 @@ int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
mutex_unlock(&pm->lock);
return 0;
+err_invalid:
+ pm->priv_queue->ops.rollback_packet(pm->priv_queue);
err_acquire_packet_buffer:
mutex_unlock(&pm->lock);
return retval;
@@ -660,8 +562,6 @@ err_acquire_packet_buffer:
void pm_release_ib(struct packet_manager *pm)
{
- BUG_ON(!pm);
-
mutex_lock(&pm->lock);
if (pm->allocated) {
kfd_gtt_sa_free(pm->dqm->dev, pm->ib_buffer_obj);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c b/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c
index 6cfe7f1f18cf..1e06de0bc673 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c
@@ -32,7 +32,8 @@ int kfd_pasid_init(void)
{
pasid_limit = KFD_MAX_NUM_OF_PROCESSES;
- pasid_bitmap = kcalloc(BITS_TO_LONGS(pasid_limit), sizeof(long), GFP_KERNEL);
+ pasid_bitmap = kcalloc(BITS_TO_LONGS(pasid_limit), sizeof(long),
+ GFP_KERNEL);
if (!pasid_bitmap)
return -ENOMEM;
@@ -91,6 +92,6 @@ unsigned int kfd_pasid_alloc(void)
void kfd_pasid_free(unsigned int pasid)
{
- BUG_ON(pasid == 0 || pasid >= pasid_limit);
- clear_bit(pasid, pasid_bitmap);
+ if (!WARN_ON(pasid == 0 || pasid >= pasid_limit))
+ clear_bit(pasid, pasid_bitmap);
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers.h b/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers.h
index 5b393f3e34a9..e50f73d25de6 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers.h
@@ -28,112 +28,19 @@
#define PM4_MES_HEADER_DEFINED
union PM4_MES_TYPE_3_HEADER {
struct {
- uint32_t reserved1:8; /* < reserved */
- uint32_t opcode:8; /* < IT opcode */
- uint32_t count:14; /* < number of DWORDs - 1
- * in the information body.
- */
- uint32_t type:2; /* < packet identifier.
- * It should be 3 for type 3 packets
- */
+ /* reserved */
+ uint32_t reserved1:8;
+ /* IT opcode */
+ uint32_t opcode:8;
+ /* number of DWORDs - 1 in the information body */
+ uint32_t count:14;
+ /* packet identifier. It should be 3 for type 3 packets */
+ uint32_t type:2;
};
uint32_t u32all;
};
#endif /* PM4_MES_HEADER_DEFINED */
-/* --------------------MES_SET_RESOURCES-------------------- */
-
-#ifndef PM4_MES_SET_RESOURCES_DEFINED
-#define PM4_MES_SET_RESOURCES_DEFINED
-enum set_resources_queue_type_enum {
- queue_type__mes_set_resources__kernel_interface_queue_kiq = 0,
- queue_type__mes_set_resources__hsa_interface_queue_hiq = 1,
- queue_type__mes_set_resources__hsa_debug_interface_queue = 4
-};
-
-struct pm4_set_resources {
- union {
- union PM4_MES_TYPE_3_HEADER header; /* header */
- uint32_t ordinal1;
- };
-
- union {
- struct {
- uint32_t vmid_mask:16;
- uint32_t unmap_latency:8;
- uint32_t reserved1:5;
- enum set_resources_queue_type_enum queue_type:3;
- } bitfields2;
- uint32_t ordinal2;
- };
-
- uint32_t queue_mask_lo;
- uint32_t queue_mask_hi;
- uint32_t gws_mask_lo;
- uint32_t gws_mask_hi;
-
- union {
- struct {
- uint32_t oac_mask:16;
- uint32_t reserved2:16;
- } bitfields7;
- uint32_t ordinal7;
- };
-
- union {
- struct {
- uint32_t gds_heap_base:6;
- uint32_t reserved3:5;
- uint32_t gds_heap_size:6;
- uint32_t reserved4:15;
- } bitfields8;
- uint32_t ordinal8;
- };
-
-};
-#endif
-
-/*--------------------MES_RUN_LIST-------------------- */
-
-#ifndef PM4_MES_RUN_LIST_DEFINED
-#define PM4_MES_RUN_LIST_DEFINED
-
-struct pm4_runlist {
- union {
- union PM4_MES_TYPE_3_HEADER header; /* header */
- uint32_t ordinal1;
- };
-
- union {
- struct {
- uint32_t reserved1:2;
- uint32_t ib_base_lo:30;
- } bitfields2;
- uint32_t ordinal2;
- };
-
- union {
- struct {
- uint32_t ib_base_hi:16;
- uint32_t reserved2:16;
- } bitfields3;
- uint32_t ordinal3;
- };
-
- union {
- struct {
- uint32_t ib_size:20;
- uint32_t chain:1;
- uint32_t offload_polling:1;
- uint32_t reserved3:1;
- uint32_t valid:1;
- uint32_t reserved4:8;
- } bitfields4;
- uint32_t ordinal4;
- };
-
-};
-#endif
/*--------------------MES_MAP_PROCESS-------------------- */
@@ -186,217 +93,58 @@ struct pm4_map_process {
};
#endif
-/*--------------------MES_MAP_QUEUES--------------------*/
-
-#ifndef PM4_MES_MAP_QUEUES_DEFINED
-#define PM4_MES_MAP_QUEUES_DEFINED
-enum map_queues_queue_sel_enum {
- queue_sel__mes_map_queues__map_to_specified_queue_slots = 0,
- queue_sel__mes_map_queues__map_to_hws_determined_queue_slots = 1,
- queue_sel__mes_map_queues__enable_process_queues = 2
-};
+#ifndef PM4_MES_MAP_PROCESS_DEFINED_KV_SCRATCH
+#define PM4_MES_MAP_PROCESS_DEFINED_KV_SCRATCH
-enum map_queues_vidmem_enum {
- vidmem__mes_map_queues__uses_no_video_memory = 0,
- vidmem__mes_map_queues__uses_video_memory = 1
-};
-
-enum map_queues_alloc_format_enum {
- alloc_format__mes_map_queues__one_per_pipe = 0,
- alloc_format__mes_map_queues__all_on_one_pipe = 1
-};
-
-enum map_queues_engine_sel_enum {
- engine_sel__mes_map_queues__compute = 0,
- engine_sel__mes_map_queues__sdma0 = 2,
- engine_sel__mes_map_queues__sdma1 = 3
-};
-
-struct pm4_map_queues {
+struct pm4_map_process_scratch_kv {
union {
- union PM4_MES_TYPE_3_HEADER header; /* header */
- uint32_t ordinal1;
- };
-
- union {
- struct {
- uint32_t reserved1:4;
- enum map_queues_queue_sel_enum queue_sel:2;
- uint32_t reserved2:2;
- uint32_t vmid:4;
- uint32_t reserved3:4;
- enum map_queues_vidmem_enum vidmem:2;
- uint32_t reserved4:6;
- enum map_queues_alloc_format_enum alloc_format:2;
- enum map_queues_engine_sel_enum engine_sel:3;
- uint32_t num_queues:3;
- } bitfields2;
- uint32_t ordinal2;
- };
-
- struct {
- union {
- struct {
- uint32_t is_static:1;
- uint32_t reserved5:1;
- uint32_t doorbell_offset:21;
- uint32_t reserved6:3;
- uint32_t queue:6;
- } bitfields3;
- uint32_t ordinal3;
- };
-
- uint32_t mqd_addr_lo;
- uint32_t mqd_addr_hi;
- uint32_t wptr_addr_lo;
- uint32_t wptr_addr_hi;
-
- } mes_map_queues_ordinals[1]; /* 1..N of these ordinal groups */
-
-};
-#endif
-
-/*--------------------MES_QUERY_STATUS--------------------*/
-
-#ifndef PM4_MES_QUERY_STATUS_DEFINED
-#define PM4_MES_QUERY_STATUS_DEFINED
-enum query_status_interrupt_sel_enum {
- interrupt_sel__mes_query_status__completion_status = 0,
- interrupt_sel__mes_query_status__process_status = 1,
- interrupt_sel__mes_query_status__queue_status = 2
-};
-
-enum query_status_command_enum {
- command__mes_query_status__interrupt_only = 0,
- command__mes_query_status__fence_only_immediate = 1,
- command__mes_query_status__fence_only_after_write_ack = 2,
- command__mes_query_status__fence_wait_for_write_ack_send_interrupt = 3
-};
-
-enum query_status_engine_sel_enum {
- engine_sel__mes_query_status__compute = 0,
- engine_sel__mes_query_status__sdma0_queue = 2,
- engine_sel__mes_query_status__sdma1_queue = 3
-};
-
-struct pm4_query_status {
- union {
- union PM4_MES_TYPE_3_HEADER header; /* header */
- uint32_t ordinal1;
- };
-
- union {
- struct {
- uint32_t context_id:28;
- enum query_status_interrupt_sel_enum interrupt_sel:2;
- enum query_status_command_enum command:2;
- } bitfields2;
- uint32_t ordinal2;
+ union PM4_MES_TYPE_3_HEADER header; /* header */
+ uint32_t ordinal1;
};
union {
struct {
uint32_t pasid:16;
- uint32_t reserved1:16;
- } bitfields3a;
- struct {
- uint32_t reserved2:2;
- uint32_t doorbell_offset:21;
- uint32_t reserved3:3;
- enum query_status_engine_sel_enum engine_sel:3;
- uint32_t reserved4:3;
- } bitfields3b;
- uint32_t ordinal3;
- };
-
- uint32_t addr_lo;
- uint32_t addr_hi;
- uint32_t data_lo;
- uint32_t data_hi;
-};
-#endif
-
-/*--------------------MES_UNMAP_QUEUES--------------------*/
-
-#ifndef PM4_MES_UNMAP_QUEUES_DEFINED
-#define PM4_MES_UNMAP_QUEUES_DEFINED
-enum unmap_queues_action_enum {
- action__mes_unmap_queues__preempt_queues = 0,
- action__mes_unmap_queues__reset_queues = 1,
- action__mes_unmap_queues__disable_process_queues = 2
-};
-
-enum unmap_queues_queue_sel_enum {
- queue_sel__mes_unmap_queues__perform_request_on_specified_queues = 0,
- queue_sel__mes_unmap_queues__perform_request_on_pasid_queues = 1,
- queue_sel__mes_unmap_queues__perform_request_on_all_active_queues = 2,
- queue_sel__mes_unmap_queues__perform_request_on_dynamic_queues_only = 3
-};
-
-enum unmap_queues_engine_sel_enum {
- engine_sel__mes_unmap_queues__compute = 0,
- engine_sel__mes_unmap_queues__sdma0 = 2,
- engine_sel__mes_unmap_queues__sdma1 = 3
-};
-
-struct pm4_unmap_queues {
- union {
- union PM4_MES_TYPE_3_HEADER header; /* header */
- uint32_t ordinal1;
- };
-
- union {
- struct {
- enum unmap_queues_action_enum action:2;
- uint32_t reserved1:2;
- enum unmap_queues_queue_sel_enum queue_sel:2;
- uint32_t reserved2:20;
- enum unmap_queues_engine_sel_enum engine_sel:3;
- uint32_t num_queues:3;
+ uint32_t reserved1:8;
+ uint32_t diq_enable:1;
+ uint32_t process_quantum:7;
} bitfields2;
uint32_t ordinal2;
};
union {
struct {
- uint32_t pasid:16;
- uint32_t reserved3:16;
- } bitfields3a;
- struct {
- uint32_t reserved4:2;
- uint32_t doorbell_offset0:21;
- uint32_t reserved5:9;
- } bitfields3b;
+ uint32_t page_table_base:28;
+ uint32_t reserved2:4;
+ } bitfields3;
uint32_t ordinal3;
};
- union {
- struct {
- uint32_t reserved6:2;
- uint32_t doorbell_offset1:21;
- uint32_t reserved7:9;
- } bitfields4;
- uint32_t ordinal4;
- };
-
- union {
- struct {
- uint32_t reserved8:2;
- uint32_t doorbell_offset2:21;
- uint32_t reserved9:9;
- } bitfields5;
- uint32_t ordinal5;
- };
+ uint32_t reserved3;
+ uint32_t sh_mem_bases;
+ uint32_t sh_mem_config;
+ uint32_t sh_mem_ape1_base;
+ uint32_t sh_mem_ape1_limit;
+ uint32_t sh_hidden_private_base_vmid;
+ uint32_t reserved4;
+ uint32_t reserved5;
+ uint32_t gds_addr_lo;
+ uint32_t gds_addr_hi;
union {
struct {
- uint32_t reserved10:2;
- uint32_t doorbell_offset3:21;
- uint32_t reserved11:9;
- } bitfields6;
- uint32_t ordinal6;
+ uint32_t num_gws:6;
+ uint32_t reserved6:2;
+ uint32_t num_oac:4;
+ uint32_t reserved7:4;
+ uint32_t gds_size:6;
+ uint32_t num_queues:10;
+ } bitfields14;
+ uint32_t ordinal14;
};
+ uint32_t completion_signal_lo32;
+uint32_t completion_signal_hi32;
};
#endif
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_vi.h b/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_vi.h
index 08c721922812..7c8d9b357749 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_vi.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_vi.h
@@ -30,10 +30,12 @@ union PM4_MES_TYPE_3_HEADER {
struct {
uint32_t reserved1 : 8; /* < reserved */
uint32_t opcode : 8; /* < IT opcode */
- uint32_t count : 14;/* < number of DWORDs - 1 in the
- information body. */
- uint32_t type : 2; /* < packet identifier.
- It should be 3 for type 3 packets */
+ uint32_t count : 14;/* < Number of DWORDS - 1 in the
+ * information body
+ */
+ uint32_t type : 2; /* < packet identifier
+ * It should be 3 for type 3 packets
+ */
};
uint32_t u32All;
};
@@ -124,9 +126,10 @@ struct pm4_mes_runlist {
uint32_t ib_size:20;
uint32_t chain:1;
uint32_t offload_polling:1;
- uint32_t reserved3:1;
+ uint32_t reserved2:1;
uint32_t valid:1;
- uint32_t reserved4:8;
+ uint32_t process_cnt:4;
+ uint32_t reserved3:4;
} bitfields4;
uint32_t ordinal4;
};
@@ -141,8 +144,8 @@ struct pm4_mes_runlist {
struct pm4_mes_map_process {
union {
- union PM4_MES_TYPE_3_HEADER header; /* header */
- uint32_t ordinal1;
+ union PM4_MES_TYPE_3_HEADER header; /* header */
+ uint32_t ordinal1;
};
union {
@@ -153,36 +156,48 @@ struct pm4_mes_map_process {
uint32_t process_quantum:7;
} bitfields2;
uint32_t ordinal2;
-};
+ };
union {
struct {
uint32_t page_table_base:28;
- uint32_t reserved2:4;
+ uint32_t reserved3:4;
} bitfields3;
uint32_t ordinal3;
};
+ uint32_t reserved;
+
uint32_t sh_mem_bases;
+ uint32_t sh_mem_config;
uint32_t sh_mem_ape1_base;
uint32_t sh_mem_ape1_limit;
- uint32_t sh_mem_config;
+
+ uint32_t sh_hidden_private_base_vmid;
+
+ uint32_t reserved2;
+ uint32_t reserved3;
+
uint32_t gds_addr_lo;
uint32_t gds_addr_hi;
union {
struct {
uint32_t num_gws:6;
- uint32_t reserved3:2;
+ uint32_t reserved4:2;
uint32_t num_oac:4;
- uint32_t reserved4:4;
+ uint32_t reserved5:4;
uint32_t gds_size:6;
uint32_t num_queues:10;
} bitfields10;
uint32_t ordinal10;
};
+ uint32_t completion_signal_lo;
+ uint32_t completion_signal_hi;
+
};
+
#endif
/*--------------------MES_MAP_QUEUES--------------------*/
@@ -335,7 +350,7 @@ enum mes_unmap_queues_engine_sel_enum {
engine_sel__mes_unmap_queues__sdmal = 3
};
-struct PM4_MES_UNMAP_QUEUES {
+struct pm4_mes_unmap_queues {
union {
union PM4_MES_TYPE_3_HEADER header; /* header */
uint32_t ordinal1;
@@ -395,4 +410,101 @@ struct PM4_MES_UNMAP_QUEUES {
};
#endif
+#ifndef PM4_MEC_RELEASE_MEM_DEFINED
+#define PM4_MEC_RELEASE_MEM_DEFINED
+enum RELEASE_MEM_event_index_enum {
+ event_index___release_mem__end_of_pipe = 5,
+ event_index___release_mem__shader_done = 6
+};
+
+enum RELEASE_MEM_cache_policy_enum {
+ cache_policy___release_mem__lru = 0,
+ cache_policy___release_mem__stream = 1,
+ cache_policy___release_mem__bypass = 2
+};
+
+enum RELEASE_MEM_dst_sel_enum {
+ dst_sel___release_mem__memory_controller = 0,
+ dst_sel___release_mem__tc_l2 = 1,
+ dst_sel___release_mem__queue_write_pointer_register = 2,
+ dst_sel___release_mem__queue_write_pointer_poll_mask_bit = 3
+};
+
+enum RELEASE_MEM_int_sel_enum {
+ int_sel___release_mem__none = 0,
+ int_sel___release_mem__send_interrupt_only = 1,
+ int_sel___release_mem__send_interrupt_after_write_confirm = 2,
+ int_sel___release_mem__send_data_after_write_confirm = 3
+};
+
+enum RELEASE_MEM_data_sel_enum {
+ data_sel___release_mem__none = 0,
+ data_sel___release_mem__send_32_bit_low = 1,
+ data_sel___release_mem__send_64_bit_data = 2,
+ data_sel___release_mem__send_gpu_clock_counter = 3,
+ data_sel___release_mem__send_cp_perfcounter_hi_lo = 4,
+ data_sel___release_mem__store_gds_data_to_memory = 5
+};
+
+struct pm4_mec_release_mem {
+ union {
+ union PM4_MES_TYPE_3_HEADER header; /*header */
+ unsigned int ordinal1;
+ };
+
+ union {
+ struct {
+ unsigned int event_type:6;
+ unsigned int reserved1:2;
+ enum RELEASE_MEM_event_index_enum event_index:4;
+ unsigned int tcl1_vol_action_ena:1;
+ unsigned int tc_vol_action_ena:1;
+ unsigned int reserved2:1;
+ unsigned int tc_wb_action_ena:1;
+ unsigned int tcl1_action_ena:1;
+ unsigned int tc_action_ena:1;
+ unsigned int reserved3:6;
+ unsigned int atc:1;
+ enum RELEASE_MEM_cache_policy_enum cache_policy:2;
+ unsigned int reserved4:5;
+ } bitfields2;
+ unsigned int ordinal2;
+ };
+
+ union {
+ struct {
+ unsigned int reserved5:16;
+ enum RELEASE_MEM_dst_sel_enum dst_sel:2;
+ unsigned int reserved6:6;
+ enum RELEASE_MEM_int_sel_enum int_sel:3;
+ unsigned int reserved7:2;
+ enum RELEASE_MEM_data_sel_enum data_sel:3;
+ } bitfields3;
+ unsigned int ordinal3;
+ };
+
+ union {
+ struct {
+ unsigned int reserved8:2;
+ unsigned int address_lo_32b:30;
+ } bitfields4;
+ struct {
+ unsigned int reserved9:3;
+ unsigned int address_lo_64b:29;
+ } bitfields5;
+ unsigned int ordinal4;
+ };
+
+ unsigned int address_hi;
+
+ unsigned int data_lo;
+
+ unsigned int data_hi;
+};
+#endif
+
+enum {
+ CACHE_FLUSH_AND_INV_TS_EVENT = 0x00000014
+};
+
#endif
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index 4750cabe4252..b397ec726400 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -239,11 +239,6 @@ enum kfd_preempt_type_filter {
KFD_PREEMPT_TYPE_FILTER_BY_PASID
};
-enum kfd_preempt_type {
- KFD_PREEMPT_TYPE_WAVEFRONT,
- KFD_PREEMPT_TYPE_WAVEFRONT_RESET
-};
-
/**
* enum kfd_queue_type
*
@@ -294,13 +289,13 @@ enum kfd_queue_format {
* @write_ptr: Defines the number of dwords written to the ring buffer.
*
* @doorbell_ptr: This field aim is to notify the H/W of new packet written to
- * the queue ring buffer. This field should be similar to write_ptr and the user
- * should update this field after he updated the write_ptr.
+ * the queue ring buffer. This field should be similar to write_ptr and the
+ * user should update this field after he updated the write_ptr.
*
* @doorbell_off: The doorbell offset in the doorbell pci-bar.
*
- * @is_interop: Defines if this is a interop queue. Interop queue means that the
- * queue can access both graphics and compute resources.
+ * @is_interop: Defines if this is a interop queue. Interop queue means that
+ * the queue can access both graphics and compute resources.
*
* @is_active: Defines if the queue is active or not.
*
@@ -352,9 +347,10 @@ struct queue_properties {
* @properties: The queue properties.
*
* @mec: Used only in no cp scheduling mode and identifies to micro engine id
- * that the queue should be execute on.
+ * that the queue should be execute on.
*
- * @pipe: Used only in no cp scheduling mode and identifies the queue's pipe id.
+ * @pipe: Used only in no cp scheduling mode and identifies the queue's pipe
+ * id.
*
* @queue: Used only in no cp scheduliong mode and identifies the queue's slot.
*
@@ -436,6 +432,7 @@ struct qcm_process_device {
uint32_t gds_size;
uint32_t num_gws;
uint32_t num_oac;
+ uint32_t sh_hidden_private_base;
};
/* Data that is per-process-per device. */
@@ -520,8 +517,8 @@ struct kfd_process {
struct mutex event_mutex;
/* All events in process hashed by ID, linked on kfd_event.events. */
DECLARE_HASHTABLE(events, 4);
- struct list_head signal_event_pages; /* struct slot_page_header.
- event_pages */
+ /* struct slot_page_header.event_pages */
+ struct list_head signal_event_pages;
u32 next_nonsignal_event_id;
size_t signal_event_count;
};
@@ -559,8 +556,10 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
struct kfd_process *p);
/* Process device data iterator */
-struct kfd_process_device *kfd_get_first_process_device_data(struct kfd_process *p);
-struct kfd_process_device *kfd_get_next_process_device_data(struct kfd_process *p,
+struct kfd_process_device *kfd_get_first_process_device_data(
+ struct kfd_process *p);
+struct kfd_process_device *kfd_get_next_process_device_data(
+ struct kfd_process *p,
struct kfd_process_device *pdd);
bool kfd_has_process_device_data(struct kfd_process *p);
@@ -573,7 +572,8 @@ unsigned int kfd_pasid_alloc(void);
void kfd_pasid_free(unsigned int pasid);
/* Doorbells */
-void kfd_doorbell_init(struct kfd_dev *kfd);
+int kfd_doorbell_init(struct kfd_dev *kfd);
+void kfd_doorbell_fini(struct kfd_dev *kfd);
int kfd_doorbell_mmap(struct kfd_process *process, struct vm_area_struct *vma);
u32 __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd,
unsigned int *doorbell_off);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index 035bbc98a63d..c74cf22a1ed9 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -79,9 +79,7 @@ struct kfd_process *kfd_create_process(const struct task_struct *thread)
{
struct kfd_process *process;
- BUG_ON(!kfd_process_wq);
-
- if (thread->mm == NULL)
+ if (!thread->mm)
return ERR_PTR(-EINVAL);
/* Only the pthreads threading model is supported. */
@@ -101,7 +99,7 @@ struct kfd_process *kfd_create_process(const struct task_struct *thread)
/* A prior open of /dev/kfd could have already created the process. */
process = find_process(thread);
if (process)
- pr_debug("kfd: process already found\n");
+ pr_debug("Process already found\n");
if (!process)
process = create_process(thread);
@@ -117,7 +115,7 @@ struct kfd_process *kfd_get_process(const struct task_struct *thread)
{
struct kfd_process *process;
- if (thread->mm == NULL)
+ if (!thread->mm)
return ERR_PTR(-EINVAL);
/* Only the pthreads threading model is supported. */
@@ -202,10 +200,8 @@ static void kfd_process_destroy_delayed(struct rcu_head *rcu)
struct kfd_process_release_work *work;
struct kfd_process *p;
- BUG_ON(!kfd_process_wq);
-
p = container_of(rcu, struct kfd_process, rcu);
- BUG_ON(atomic_read(&p->mm->mm_count) <= 0);
+ WARN_ON(atomic_read(&p->mm->mm_count) <= 0);
mmdrop(p->mm);
@@ -229,7 +225,8 @@ static void kfd_process_notifier_release(struct mmu_notifier *mn,
* mmu_notifier srcu is read locked
*/
p = container_of(mn, struct kfd_process, mmu_notifier);
- BUG_ON(p->mm != mm);
+ if (WARN_ON(p->mm != mm))
+ return;
mutex_lock(&kfd_processes_mutex);
hash_del_rcu(&p->kfd_processes);
@@ -250,7 +247,7 @@ static void kfd_process_notifier_release(struct mmu_notifier *mn,
kfd_dbgmgr_destroy(pdd->dev->dbgmgr);
if (pdd->reset_wavefronts) {
- pr_warn("amdkfd: Resetting all wave fronts\n");
+ pr_warn("Resetting all wave fronts\n");
dbgdev_wave_reset_wavefronts(pdd->dev, p);
pdd->reset_wavefronts = false;
}
@@ -407,8 +404,6 @@ void kfd_unbind_process_from_device(struct kfd_dev *dev, unsigned int pasid)
struct kfd_process *p;
struct kfd_process_device *pdd;
- BUG_ON(dev == NULL);
-
/*
* Look for the process that matches the pasid. If there is no such
* process, we either released it in amdkfd's own notifier, or there
@@ -449,14 +444,16 @@ void kfd_unbind_process_from_device(struct kfd_dev *dev, unsigned int pasid)
mutex_unlock(&p->mutex);
}
-struct kfd_process_device *kfd_get_first_process_device_data(struct kfd_process *p)
+struct kfd_process_device *kfd_get_first_process_device_data(
+ struct kfd_process *p)
{
return list_first_entry(&p->per_device_data,
struct kfd_process_device,
per_device_list);
}
-struct kfd_process_device *kfd_get_next_process_device_data(struct kfd_process *p,
+struct kfd_process_device *kfd_get_next_process_device_data(
+ struct kfd_process *p,
struct kfd_process_device *pdd)
{
if (list_is_last(&pdd->per_device_list, &p->per_device_data))
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index 32cdf2b483db..1cae95e2b13a 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -32,12 +32,9 @@ static inline struct process_queue_node *get_queue_by_qid(
{
struct process_queue_node *pqn;
- BUG_ON(!pqm);
-
list_for_each_entry(pqn, &pqm->queues, process_queue_list) {
- if (pqn->q && pqn->q->properties.queue_id == qid)
- return pqn;
- if (pqn->kq && pqn->kq->queue->properties.queue_id == qid)
+ if ((pqn->q && pqn->q->properties.queue_id == qid) ||
+ (pqn->kq && pqn->kq->queue->properties.queue_id == qid))
return pqn;
}
@@ -49,17 +46,13 @@ static int find_available_queue_slot(struct process_queue_manager *pqm,
{
unsigned long found;
- BUG_ON(!pqm || !qid);
-
- pr_debug("kfd: in %s\n", __func__);
-
found = find_first_zero_bit(pqm->queue_slot_bitmap,
KFD_MAX_NUM_OF_QUEUES_PER_PROCESS);
- pr_debug("kfd: the new slot id %lu\n", found);
+ pr_debug("The new slot id %lu\n", found);
if (found >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) {
- pr_info("amdkfd: Can not open more queues for process with pasid %d\n",
+ pr_info("Cannot open more queues for process with pasid %d\n",
pqm->process->pasid);
return -ENOMEM;
}
@@ -72,13 +65,11 @@ static int find_available_queue_slot(struct process_queue_manager *pqm,
int pqm_init(struct process_queue_manager *pqm, struct kfd_process *p)
{
- BUG_ON(!pqm);
-
INIT_LIST_HEAD(&pqm->queues);
pqm->queue_slot_bitmap =
kzalloc(DIV_ROUND_UP(KFD_MAX_NUM_OF_QUEUES_PER_PROCESS,
BITS_PER_BYTE), GFP_KERNEL);
- if (pqm->queue_slot_bitmap == NULL)
+ if (!pqm->queue_slot_bitmap)
return -ENOMEM;
pqm->process = p;
@@ -90,10 +81,6 @@ void pqm_uninit(struct process_queue_manager *pqm)
int retval;
struct process_queue_node *pqn, *next;
- BUG_ON(!pqm);
-
- pr_debug("In func %s\n", __func__);
-
list_for_each_entry_safe(pqn, next, &pqm->queues, process_queue_list) {
retval = pqm_destroy_queue(
pqm,
@@ -102,7 +89,7 @@ void pqm_uninit(struct process_queue_manager *pqm)
pqn->kq->queue->properties.queue_id);
if (retval != 0) {
- pr_err("kfd: failed to destroy queue\n");
+ pr_err("failed to destroy queue\n");
return;
}
}
@@ -117,8 +104,6 @@ static int create_cp_queue(struct process_queue_manager *pqm,
{
int retval;
- retval = 0;
-
/* Doorbell initialized in user space*/
q_properties->doorbell_ptr = NULL;
@@ -131,16 +116,13 @@ static int create_cp_queue(struct process_queue_manager *pqm,
retval = init_queue(q, q_properties);
if (retval != 0)
- goto err_init_queue;
+ return retval;
(*q)->device = dev;
(*q)->process = pqm->process;
- pr_debug("kfd: PQM After init queue");
-
- return retval;
+ pr_debug("PQM After init queue");
-err_init_queue:
return retval;
}
@@ -161,8 +143,6 @@ int pqm_create_queue(struct process_queue_manager *pqm,
int num_queues = 0;
struct queue *cur;
- BUG_ON(!pqm || !dev || !properties || !qid);
-
memset(&q_properties, 0, sizeof(struct queue_properties));
memcpy(&q_properties, properties, sizeof(struct queue_properties));
q = NULL;
@@ -185,7 +165,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
list_for_each_entry(cur, &pdd->qpd.queues_list, list)
num_queues++;
if (num_queues >= dev->device_info->max_no_of_hqd/2)
- return (-ENOSPC);
+ return -ENOSPC;
}
retval = find_available_queue_slot(pqm, qid);
@@ -197,7 +177,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
dev->dqm->ops.register_process(dev->dqm, &pdd->qpd);
}
- pqn = kzalloc(sizeof(struct process_queue_node), GFP_KERNEL);
+ pqn = kzalloc(sizeof(*pqn), GFP_KERNEL);
if (!pqn) {
retval = -ENOMEM;
goto err_allocate_pqn;
@@ -210,7 +190,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
if ((sched_policy == KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION) &&
((dev->dqm->processes_count >= VMID_PER_DEVICE) ||
(dev->dqm->queue_count >= get_queues_num(dev->dqm)))) {
- pr_err("kfd: over-subscription is not allowed in radeon_kfd.sched_policy == 1\n");
+ pr_err("Over-subscription is not allowed in radeon_kfd.sched_policy == 1\n");
retval = -EPERM;
goto err_create_queue;
}
@@ -227,7 +207,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
break;
case KFD_QUEUE_TYPE_DIQ:
kq = kernel_queue_init(dev, KFD_QUEUE_TYPE_DIQ);
- if (kq == NULL) {
+ if (!kq) {
retval = -ENOMEM;
goto err_create_queue;
}
@@ -238,22 +218,22 @@ int pqm_create_queue(struct process_queue_manager *pqm,
kq, &pdd->qpd);
break;
default:
- BUG();
- break;
+ WARN(1, "Invalid queue type %d", type);
+ retval = -EINVAL;
}
if (retval != 0) {
- pr_debug("Error dqm create queue\n");
+ pr_err("DQM create queue failed\n");
goto err_create_queue;
}
- pr_debug("kfd: PQM After DQM create queue\n");
+ pr_debug("PQM After DQM create queue\n");
list_add(&pqn->process_queue_list, &pqm->queues);
if (q) {
*properties = q->properties;
- pr_debug("kfd: PQM done creating queue\n");
+ pr_debug("PQM done creating queue\n");
print_queue_properties(properties);
}
@@ -279,14 +259,11 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid)
dqm = NULL;
- BUG_ON(!pqm);
retval = 0;
- pr_debug("kfd: In Func %s\n", __func__);
-
pqn = get_queue_by_qid(pqm, qid);
- if (pqn == NULL) {
- pr_err("kfd: queue id does not match any known queue\n");
+ if (!pqn) {
+ pr_err("Queue id does not match any known queue\n");
return -EINVAL;
}
@@ -295,7 +272,8 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid)
dev = pqn->kq->dev;
if (pqn->q)
dev = pqn->q->device;
- BUG_ON(!dev);
+ if (WARN_ON(!dev))
+ return -ENODEV;
pdd = kfd_get_process_device_data(dev, pqm->process);
if (!pdd) {
@@ -335,12 +313,9 @@ int pqm_update_queue(struct process_queue_manager *pqm, unsigned int qid,
int retval;
struct process_queue_node *pqn;
- BUG_ON(!pqm);
-
pqn = get_queue_by_qid(pqm, qid);
if (!pqn) {
- pr_debug("amdkfd: No queue %d exists for update operation\n",
- qid);
+ pr_debug("No queue %d exists for update operation\n", qid);
return -EFAULT;
}
@@ -363,8 +338,6 @@ struct kernel_queue *pqm_get_kernel_queue(
{
struct process_queue_node *pqn;
- BUG_ON(!pqm);
-
pqn = get_queue_by_qid(pqm, qid);
if (pqn && pqn->kq)
return pqn->kq;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
index 0ab197077f2d..a5315d4f1c95 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
@@ -65,17 +65,15 @@ void print_queue(struct queue *q)
int init_queue(struct queue **q, const struct queue_properties *properties)
{
- struct queue *tmp;
+ struct queue *tmp_q;
- BUG_ON(!q);
-
- tmp = kzalloc(sizeof(struct queue), GFP_KERNEL);
- if (!tmp)
+ tmp_q = kzalloc(sizeof(*tmp_q), GFP_KERNEL);
+ if (!tmp_q)
return -ENOMEM;
- memcpy(&tmp->properties, properties, sizeof(struct queue_properties));
+ memcpy(&tmp_q->properties, properties, sizeof(*properties));
- *q = tmp;
+ *q = tmp_q;
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index 1e5064749959..19ce59028d6b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -108,9 +108,6 @@ static int kfd_topology_get_crat_acpi(void *crat_image, size_t *size)
static void kfd_populated_cu_info_cpu(struct kfd_topology_device *dev,
struct crat_subtype_computeunit *cu)
{
- BUG_ON(!dev);
- BUG_ON(!cu);
-
dev->node_props.cpu_cores_count = cu->num_cpu_cores;
dev->node_props.cpu_core_id_base = cu->processor_id_low;
if (cu->hsa_capability & CRAT_CU_FLAGS_IOMMU_PRESENT)
@@ -123,9 +120,6 @@ static void kfd_populated_cu_info_cpu(struct kfd_topology_device *dev,
static void kfd_populated_cu_info_gpu(struct kfd_topology_device *dev,
struct crat_subtype_computeunit *cu)
{
- BUG_ON(!dev);
- BUG_ON(!cu);
-
dev->node_props.simd_id_base = cu->processor_id_low;
dev->node_props.simd_count = cu->num_simd_cores;
dev->node_props.lds_size_in_kb = cu->lds_size_in_kb;
@@ -148,8 +142,6 @@ static int kfd_parse_subtype_cu(struct crat_subtype_computeunit *cu)
struct kfd_topology_device *dev;
int i = 0;
- BUG_ON(!cu);
-
pr_info("Found CU entry in CRAT table with proximity_domain=%d caps=%x\n",
cu->proximity_domain, cu->hsa_capability);
list_for_each_entry(dev, &topology_device_list, list) {
@@ -177,8 +169,6 @@ static int kfd_parse_subtype_mem(struct crat_subtype_memory *mem)
struct kfd_topology_device *dev;
int i = 0;
- BUG_ON(!mem);
-
pr_info("Found memory entry in CRAT table with proximity_domain=%d\n",
mem->promixity_domain);
list_for_each_entry(dev, &topology_device_list, list) {
@@ -223,8 +213,6 @@ static int kfd_parse_subtype_cache(struct crat_subtype_cache *cache)
struct kfd_topology_device *dev;
uint32_t id;
- BUG_ON(!cache);
-
id = cache->processor_id_low;
pr_info("Found cache entry in CRAT table with processor_id=%d\n", id);
@@ -274,8 +262,6 @@ static int kfd_parse_subtype_iolink(struct crat_subtype_iolink *iolink)
uint32_t id_from;
uint32_t id_to;
- BUG_ON(!iolink);
-
id_from = iolink->proximity_domain_from;
id_to = iolink->proximity_domain_to;
@@ -323,8 +309,6 @@ static int kfd_parse_subtype(struct crat_subtype_generic *sub_type_hdr)
struct crat_subtype_iolink *iolink;
int ret = 0;
- BUG_ON(!sub_type_hdr);
-
switch (sub_type_hdr->type) {
case CRAT_SUBTYPE_COMPUTEUNIT_AFFINITY:
cu = (struct crat_subtype_computeunit *)sub_type_hdr;
@@ -368,8 +352,6 @@ static void kfd_release_topology_device(struct kfd_topology_device *dev)
struct kfd_cache_properties *cache;
struct kfd_iolink_properties *iolink;
- BUG_ON(!dev);
-
list_del(&dev->list);
while (dev->mem_props.next != &dev->mem_props) {
@@ -416,7 +398,7 @@ static struct kfd_topology_device *kfd_create_topology_device(void)
struct kfd_topology_device *dev;
dev = kfd_alloc_struct(dev);
- if (dev == NULL) {
+ if (!dev) {
pr_err("No memory to allocate a topology device");
return NULL;
}
@@ -666,7 +648,7 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
dev->node_props.simd_count);
if (dev->mem_bank_count < dev->node_props.mem_banks_count) {
- pr_info_once("kfd: mem_banks_count truncated from %d to %d\n",
+ pr_info_once("mem_banks_count truncated from %d to %d\n",
dev->node_props.mem_banks_count,
dev->mem_bank_count);
sysfs_show_32bit_prop(buffer, "mem_banks_count",
@@ -763,8 +745,6 @@ static void kfd_remove_sysfs_node_entry(struct kfd_topology_device *dev)
struct kfd_cache_properties *cache;
struct kfd_mem_properties *mem;
- BUG_ON(!dev);
-
if (dev->kobj_iolink) {
list_for_each_entry(iolink, &dev->io_link_props, list)
if (iolink->kobj) {
@@ -819,12 +799,12 @@ static int kfd_build_sysfs_node_entry(struct kfd_topology_device *dev,
int ret;
uint32_t i;
- BUG_ON(!dev);
+ if (WARN_ON(dev->kobj_node))
+ return -EEXIST;
/*
* Creating the sysfs folders
*/
- BUG_ON(dev->kobj_node);
dev->kobj_node = kfd_alloc_struct(dev->kobj_node);
if (!dev->kobj_node)
return -ENOMEM;
@@ -957,7 +937,7 @@ static int kfd_topology_update_sysfs(void)
int ret;
pr_info("Creating topology SYSFS entries\n");
- if (sys_props.kobj_topology == NULL) {
+ if (!sys_props.kobj_topology) {
sys_props.kobj_topology =
kfd_alloc_struct(sys_props.kobj_topology);
if (!sys_props.kobj_topology)
@@ -1117,10 +1097,8 @@ static struct kfd_topology_device *kfd_assign_gpu(struct kfd_dev *gpu)
struct kfd_topology_device *dev;
struct kfd_topology_device *out_dev = NULL;
- BUG_ON(!gpu);
-
list_for_each_entry(dev, &topology_device_list, list)
- if (dev->gpu == NULL && dev->node_props.simd_count > 0) {
+ if (!dev->gpu && (dev->node_props.simd_count > 0)) {
dev->gpu = gpu;
out_dev = dev;
break;
@@ -1143,11 +1121,9 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
struct kfd_topology_device *dev;
int res;
- BUG_ON(!gpu);
-
gpu_id = kfd_generate_gpu_id(gpu);
- pr_debug("kfd: Adding new GPU (ID: 0x%x) to topology\n", gpu_id);
+ pr_debug("Adding new GPU (ID: 0x%x) to topology\n", gpu_id);
down_write(&topology_lock);
/*
@@ -1170,8 +1146,8 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
* GPU vBIOS
*/
- /*
- * Update the SYSFS tree, since we added another topology device
+ /* Update the SYSFS tree, since we added another topology
+ * device
*/
if (kfd_topology_update_sysfs() < 0)
kfd_topology_release_sysfs();
@@ -1190,7 +1166,7 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
if (dev->gpu->device_info->asic_family == CHIP_CARRIZO) {
dev->node_props.capability |= HSA_CAP_DOORBELL_PACKET_TYPE;
- pr_info("amdkfd: adding doorbell packet type capability\n");
+ pr_info("Adding doorbell packet type capability\n");
}
res = 0;
@@ -1210,8 +1186,6 @@ int kfd_topology_remove_device(struct kfd_dev *gpu)
uint32_t gpu_id;
int res = -ENODEV;
- BUG_ON(!gpu);
-
down_write(&topology_lock);
list_for_each_entry(dev, &topology_device_list, list)
diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h
index 0021a1c63356..837296db9628 100644
--- a/drivers/gpu/drm/amd/include/atomfirmware.h
+++ b/drivers/gpu/drm/amd/include/atomfirmware.h
@@ -1233,6 +1233,69 @@ struct atom_asic_profiling_info_v4_1
uint32_t phyclk2gfxclk_c;
};
+struct atom_asic_profiling_info_v4_2 {
+ struct atom_common_table_header table_header;
+ uint32_t maxvddc;
+ uint32_t minvddc;
+ uint32_t avfs_meannsigma_acontant0;
+ uint32_t avfs_meannsigma_acontant1;
+ uint32_t avfs_meannsigma_acontant2;
+ uint16_t avfs_meannsigma_dc_tol_sigma;
+ uint16_t avfs_meannsigma_platform_mean;
+ uint16_t avfs_meannsigma_platform_sigma;
+ uint32_t gb_vdroop_table_cksoff_a0;
+ uint32_t gb_vdroop_table_cksoff_a1;
+ uint32_t gb_vdroop_table_cksoff_a2;
+ uint32_t gb_vdroop_table_ckson_a0;
+ uint32_t gb_vdroop_table_ckson_a1;
+ uint32_t gb_vdroop_table_ckson_a2;
+ uint32_t avfsgb_fuse_table_cksoff_m1;
+ uint32_t avfsgb_fuse_table_cksoff_m2;
+ uint32_t avfsgb_fuse_table_cksoff_b;
+ uint32_t avfsgb_fuse_table_ckson_m1;
+ uint32_t avfsgb_fuse_table_ckson_m2;
+ uint32_t avfsgb_fuse_table_ckson_b;
+ uint16_t max_voltage_0_25mv;
+ uint8_t enable_gb_vdroop_table_cksoff;
+ uint8_t enable_gb_vdroop_table_ckson;
+ uint8_t enable_gb_fuse_table_cksoff;
+ uint8_t enable_gb_fuse_table_ckson;
+ uint16_t psm_age_comfactor;
+ uint8_t enable_apply_avfs_cksoff_voltage;
+ uint8_t reserved;
+ uint32_t dispclk2gfxclk_a;
+ uint32_t dispclk2gfxclk_b;
+ uint32_t dispclk2gfxclk_c;
+ uint32_t pixclk2gfxclk_a;
+ uint32_t pixclk2gfxclk_b;
+ uint32_t pixclk2gfxclk_c;
+ uint32_t dcefclk2gfxclk_a;
+ uint32_t dcefclk2gfxclk_b;
+ uint32_t dcefclk2gfxclk_c;
+ uint32_t phyclk2gfxclk_a;
+ uint32_t phyclk2gfxclk_b;
+ uint32_t phyclk2gfxclk_c;
+ uint32_t acg_gb_vdroop_table_a0;
+ uint32_t acg_gb_vdroop_table_a1;
+ uint32_t acg_gb_vdroop_table_a2;
+ uint32_t acg_avfsgb_fuse_table_m1;
+ uint32_t acg_avfsgb_fuse_table_m2;
+ uint32_t acg_avfsgb_fuse_table_b;
+ uint8_t enable_acg_gb_vdroop_table;
+ uint8_t enable_acg_gb_fuse_table;
+ uint32_t acg_dispclk2gfxclk_a;
+ uint32_t acg_dispclk2gfxclk_b;
+ uint32_t acg_dispclk2gfxclk_c;
+ uint32_t acg_pixclk2gfxclk_a;
+ uint32_t acg_pixclk2gfxclk_b;
+ uint32_t acg_pixclk2gfxclk_c;
+ uint32_t acg_dcefclk2gfxclk_a;
+ uint32_t acg_dcefclk2gfxclk_b;
+ uint32_t acg_dcefclk2gfxclk_c;
+ uint32_t acg_phyclk2gfxclk_a;
+ uint32_t acg_phyclk2gfxclk_b;
+ uint32_t acg_phyclk2gfxclk_c;
+};
/*
***************************************************************************
diff --git a/drivers/gpu/drm/amd/include/cgs_common.h b/drivers/gpu/drm/amd/include/cgs_common.h
index 0a94f749e3c0..0214f63f52fc 100644
--- a/drivers/gpu/drm/amd/include/cgs_common.h
+++ b/drivers/gpu/drm/amd/include/cgs_common.h
@@ -50,6 +50,7 @@ enum cgs_ind_reg {
CGS_IND_REG__UVD_CTX,
CGS_IND_REG__DIDT,
CGS_IND_REG_GC_CAC,
+ CGS_IND_REG_SE_CAC,
CGS_IND_REG__AUDIO_ENDPT
};
@@ -406,6 +407,8 @@ typedef int (*cgs_is_virtualization_enabled_t)(void *cgs_device);
typedef int (*cgs_enter_safe_mode)(struct cgs_device *cgs_device, bool en);
+typedef void (*cgs_lock_grbm_idx)(struct cgs_device *cgs_device, bool lock);
+
struct cgs_ops {
/* memory management calls (similar to KFD interface) */
cgs_alloc_gpu_mem_t alloc_gpu_mem;
@@ -441,6 +444,7 @@ struct cgs_ops {
cgs_query_system_info query_system_info;
cgs_is_virtualization_enabled_t is_virtualization_enabled;
cgs_enter_safe_mode enter_safe_mode;
+ cgs_lock_grbm_idx lock_grbm_idx;
};
struct cgs_os_ops; /* To be define in OS-specific CGS header */
@@ -517,4 +521,6 @@ struct cgs_device
#define cgs_enter_safe_mode(cgs_device, en) \
CGS_CALL(enter_safe_mode, cgs_device, en)
+#define cgs_lock_grbm_idx(cgs_device, lock) \
+ CGS_CALL(lock_grbm_idx, cgs_device, lock)
#endif /* _CGS_COMMON_H */
diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
index 36f376677a53..94277cb734d2 100644
--- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
@@ -41,6 +41,11 @@ struct kgd_dev;
struct kgd_mem;
+enum kfd_preempt_type {
+ KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN = 0,
+ KFD_PREEMPT_TYPE_WAVEFRONT_RESET,
+};
+
enum kgd_memory_pool {
KGD_POOL_SYSTEM_CACHEABLE = 1,
KGD_POOL_SYSTEM_WRITECOMBINE = 2,
@@ -82,6 +87,17 @@ struct kgd2kfd_shared_resources {
size_t doorbell_start_offset;
};
+struct tile_config {
+ uint32_t *tile_config_ptr;
+ uint32_t *macro_tile_config_ptr;
+ uint32_t num_tile_configs;
+ uint32_t num_macro_tile_configs;
+
+ uint32_t gb_addr_config;
+ uint32_t num_banks;
+ uint32_t num_ranks;
+};
+
/**
* struct kfd2kgd_calls
*
@@ -123,6 +139,11 @@ struct kgd2kfd_shared_resources {
*
* @get_fw_version: Returns FW versions from the header
*
+ * @set_scratch_backing_va: Sets VA for scratch backing memory of a VMID.
+ * Only used for no cp scheduling mode
+ *
+ * @get_tile_config: Returns GPU-specific tiling mode information
+ *
* This structure contains function pointers to services that the kgd driver
* provides to amdkfd driver.
*
@@ -153,14 +174,16 @@ struct kfd2kgd_calls {
int (*init_interrupts)(struct kgd_dev *kgd, uint32_t pipe_id);
int (*hqd_load)(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
- uint32_t queue_id, uint32_t __user *wptr);
+ uint32_t queue_id, uint32_t __user *wptr,
+ uint32_t wptr_shift, uint32_t wptr_mask,
+ struct mm_struct *mm);
int (*hqd_sdma_load)(struct kgd_dev *kgd, void *mqd);
bool (*hqd_is_occupied)(struct kgd_dev *kgd, uint64_t queue_address,
uint32_t pipe_id, uint32_t queue_id);
- int (*hqd_destroy)(struct kgd_dev *kgd, uint32_t reset_type,
+ int (*hqd_destroy)(struct kgd_dev *kgd, void *mqd, uint32_t reset_type,
unsigned int timeout, uint32_t pipe_id,
uint32_t queue_id);
@@ -192,6 +215,9 @@ struct kfd2kgd_calls {
uint16_t (*get_fw_version)(struct kgd_dev *kgd,
enum kgd_engine_type type);
+ void (*set_scratch_backing_va)(struct kgd_dev *kgd,
+ uint64_t va, uint32_t vmid);
+ int (*get_tile_config)(struct kgd_dev *kgd, struct tile_config *config);
};
/**
diff --git a/drivers/gpu/drm/amd/include/vi_structs.h b/drivers/gpu/drm/amd/include/vi_structs.h
index ca93b5160ba6..3e606a761d0e 100644
--- a/drivers/gpu/drm/amd/include/vi_structs.h
+++ b/drivers/gpu/drm/amd/include/vi_structs.h
@@ -419,8 +419,8 @@ struct vi_mqd_allocation {
struct vi_mqd mqd;
uint32_t wptr_poll_mem;
uint32_t rptr_report_mem;
- uint32_t dyamic_cu_mask;
- uint32_t dyamic_rb_mask;
+ uint32_t dynamic_cu_mask;
+ uint32_t dynamic_rb_mask;
};
struct cz_mqd {
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
index 0b74da3dca8b..bc839ff0bdd0 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
@@ -1240,13 +1240,18 @@ static int cz_phm_force_dpm_highest(struct pp_hwmgr *hwmgr)
{
struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
- if (cz_hwmgr->sclk_dpm.soft_min_clk !=
- cz_hwmgr->sclk_dpm.soft_max_clk)
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_SetSclkSoftMin,
- cz_get_sclk_level(hwmgr,
- cz_hwmgr->sclk_dpm.soft_max_clk,
- PPSMC_MSG_SetSclkSoftMin));
+ smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_SetSclkSoftMin,
+ cz_get_sclk_level(hwmgr,
+ cz_hwmgr->sclk_dpm.soft_max_clk,
+ PPSMC_MSG_SetSclkSoftMin));
+
+ smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_SetSclkSoftMax,
+ cz_get_sclk_level(hwmgr,
+ cz_hwmgr->sclk_dpm.soft_max_clk,
+ PPSMC_MSG_SetSclkSoftMax));
+
return 0;
}
@@ -1292,17 +1297,55 @@ static int cz_phm_force_dpm_lowest(struct pp_hwmgr *hwmgr)
{
struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
- if (cz_hwmgr->sclk_dpm.soft_min_clk !=
- cz_hwmgr->sclk_dpm.soft_max_clk) {
- cz_hwmgr->sclk_dpm.soft_max_clk =
- cz_hwmgr->sclk_dpm.soft_min_clk;
+ smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_SetSclkSoftMax,
+ cz_get_sclk_level(hwmgr,
+ cz_hwmgr->sclk_dpm.soft_min_clk,
+ PPSMC_MSG_SetSclkSoftMax));
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_SetSclkSoftMin,
+ cz_get_sclk_level(hwmgr,
+ cz_hwmgr->sclk_dpm.soft_min_clk,
+ PPSMC_MSG_SetSclkSoftMin));
+
+ return 0;
+}
+
+static int cz_phm_force_dpm_sclk(struct pp_hwmgr *hwmgr, uint32_t sclk)
+{
+ smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_SetSclkSoftMin,
+ cz_get_sclk_level(hwmgr,
+ sclk,
+ PPSMC_MSG_SetSclkSoftMin));
+
+ smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
PPSMC_MSG_SetSclkSoftMax,
cz_get_sclk_level(hwmgr,
- cz_hwmgr->sclk_dpm.soft_max_clk,
+ sclk,
PPSMC_MSG_SetSclkSoftMax));
+ return 0;
+}
+
+static int cz_get_profiling_clk(struct pp_hwmgr *hwmgr, uint32_t *sclk)
+{
+ struct phm_clock_voltage_dependency_table *table =
+ hwmgr->dyn_state.vddc_dependency_on_sclk;
+ int32_t tmp_sclk;
+ int32_t count;
+
+ tmp_sclk = table->entries[table->count-1].clk * 70 / 100;
+
+ for (count = table->count-1; count >= 0; count--) {
+ if (tmp_sclk >= table->entries[count].clk) {
+ tmp_sclk = table->entries[count].clk;
+ *sclk = tmp_sclk;
+ break;
+ }
}
+ if (count < 0)
+ *sclk = table->entries[0].clk;
return 0;
}
@@ -1310,30 +1353,70 @@ static int cz_phm_force_dpm_lowest(struct pp_hwmgr *hwmgr)
static int cz_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
enum amd_dpm_forced_level level)
{
+ uint32_t sclk = 0;
int ret = 0;
+ uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
+ AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
+ AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
+
+ if (level == hwmgr->dpm_level)
+ return ret;
+
+ if (!(hwmgr->dpm_level & profile_mode_mask)) {
+ /* enter profile mode, save current level, disable gfx cg*/
+ if (level & profile_mode_mask) {
+ hwmgr->saved_dpm_level = hwmgr->dpm_level;
+ cgs_set_clockgating_state(hwmgr->device,
+ AMD_IP_BLOCK_TYPE_GFX,
+ AMD_CG_STATE_UNGATE);
+ }
+ } else {
+ /* exit profile mode, restore level, enable gfx cg*/
+ if (!(level & profile_mode_mask)) {
+ if (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
+ level = hwmgr->saved_dpm_level;
+ cgs_set_clockgating_state(hwmgr->device,
+ AMD_IP_BLOCK_TYPE_GFX,
+ AMD_CG_STATE_GATE);
+ }
+ }
switch (level) {
case AMD_DPM_FORCED_LEVEL_HIGH:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
ret = cz_phm_force_dpm_highest(hwmgr);
if (ret)
return ret;
+ hwmgr->dpm_level = level;
break;
case AMD_DPM_FORCED_LEVEL_LOW:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
ret = cz_phm_force_dpm_lowest(hwmgr);
if (ret)
return ret;
+ hwmgr->dpm_level = level;
break;
case AMD_DPM_FORCED_LEVEL_AUTO:
ret = cz_phm_unforce_dpm_levels(hwmgr);
if (ret)
return ret;
+ hwmgr->dpm_level = level;
+ break;
+ case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
+ ret = cz_get_profiling_clk(hwmgr, &sclk);
+ if (ret)
+ return ret;
+ hwmgr->dpm_level = level;
+ cz_phm_force_dpm_sclk(hwmgr, sclk);
+ break;
+ case AMD_DPM_FORCED_LEVEL_MANUAL:
+ hwmgr->dpm_level = level;
break;
+ case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
default:
break;
}
- hwmgr->dpm_level = level;
-
return ret;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
index d025653c7823..9547f265a8bb 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
@@ -557,9 +557,8 @@ uint16_t phm_find_closest_vddci(struct pp_atomctrl_voltage_table *vddci_table, u
return vddci_table->entries[i].value;
}
- PP_ASSERT_WITH_CODE(false,
- "VDDCI is larger than max VDDCI in VDDCI Voltage Table!",
- return vddci_table->entries[i-1].value);
+ pr_debug("vddci is larger than max value in vddci_table\n");
+ return vddci_table->entries[i-1].value;
}
int phm_find_boot_level(void *table,
@@ -583,26 +582,26 @@ int phm_get_sclk_for_voltage_evv(struct pp_hwmgr *hwmgr,
phm_ppt_v1_voltage_lookup_table *lookup_table,
uint16_t virtual_voltage_id, int32_t *sclk)
{
- uint8_t entryId;
- uint8_t voltageId;
+ uint8_t entry_id;
+ uint8_t voltage_id;
struct phm_ppt_v1_information *table_info =
(struct phm_ppt_v1_information *)(hwmgr->pptable);
PP_ASSERT_WITH_CODE(lookup_table->count != 0, "Lookup table is empty", return -EINVAL);
/* search for leakage voltage ID 0xff01 ~ 0xff08 and sckl */
- for (entryId = 0; entryId < table_info->vdd_dep_on_sclk->count; entryId++) {
- voltageId = table_info->vdd_dep_on_sclk->entries[entryId].vddInd;
- if (lookup_table->entries[voltageId].us_vdd == virtual_voltage_id)
+ for (entry_id = 0; entry_id < table_info->vdd_dep_on_sclk->count; entry_id++) {
+ voltage_id = table_info->vdd_dep_on_sclk->entries[entry_id].vddInd;
+ if (lookup_table->entries[voltage_id].us_vdd == virtual_voltage_id)
break;
}
- PP_ASSERT_WITH_CODE(entryId < table_info->vdd_dep_on_sclk->count,
- "Can't find requested voltage id in vdd_dep_on_sclk table!",
- return -EINVAL;
- );
+ if (entry_id >= table_info->vdd_dep_on_sclk->count) {
+ pr_debug("Can't find requested voltage id in vdd_dep_on_sclk table\n");
+ return -EINVAL;
+ }
- *sclk = table_info->vdd_dep_on_sclk->entries[entryId].clk;
+ *sclk = table_info->vdd_dep_on_sclk->entries[entry_id].clk;
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c
index 720d5006ff62..c062844b15f3 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c
@@ -142,7 +142,7 @@ int pp_atomfwctrl_get_voltage_table_v4(struct pp_hwmgr *hwmgr,
}
} else if (voltage_mode == VOLTAGE_OBJ_SVID2) {
voltage_table->psi1_enable =
- voltage_object->svid2_voltage_obj.loadline_psi1 & 0x1;
+ (voltage_object->svid2_voltage_obj.loadline_psi1 & 0x20) >> 5;
voltage_table->psi0_enable =
voltage_object->svid2_voltage_obj.psi0_enable & 0x1;
voltage_table->max_vid_step =
@@ -276,7 +276,10 @@ int pp_atomfwctrl_get_avfs_information(struct pp_hwmgr *hwmgr,
struct pp_atomfwctrl_avfs_parameters *param)
{
uint16_t idx;
+ uint8_t format_revision, content_revision;
+
struct atom_asic_profiling_info_v4_1 *profile;
+ struct atom_asic_profiling_info_v4_2 *profile_v4_2;
idx = GetIndexIntoMasterDataTable(asic_profiling_info);
profile = (struct atom_asic_profiling_info_v4_1 *)
@@ -286,76 +289,172 @@ int pp_atomfwctrl_get_avfs_information(struct pp_hwmgr *hwmgr,
if (!profile)
return -1;
- param->ulMaxVddc = le32_to_cpu(profile->maxvddc);
- param->ulMinVddc = le32_to_cpu(profile->minvddc);
- param->ulMeanNsigmaAcontant0 =
- le32_to_cpu(profile->avfs_meannsigma_acontant0);
- param->ulMeanNsigmaAcontant1 =
- le32_to_cpu(profile->avfs_meannsigma_acontant1);
- param->ulMeanNsigmaAcontant2 =
- le32_to_cpu(profile->avfs_meannsigma_acontant2);
- param->usMeanNsigmaDcTolSigma =
- le16_to_cpu(profile->avfs_meannsigma_dc_tol_sigma);
- param->usMeanNsigmaPlatformMean =
- le16_to_cpu(profile->avfs_meannsigma_platform_mean);
- param->usMeanNsigmaPlatformSigma =
- le16_to_cpu(profile->avfs_meannsigma_platform_sigma);
- param->ulGbVdroopTableCksoffA0 =
- le32_to_cpu(profile->gb_vdroop_table_cksoff_a0);
- param->ulGbVdroopTableCksoffA1 =
- le32_to_cpu(profile->gb_vdroop_table_cksoff_a1);
- param->ulGbVdroopTableCksoffA2 =
- le32_to_cpu(profile->gb_vdroop_table_cksoff_a2);
- param->ulGbVdroopTableCksonA0 =
- le32_to_cpu(profile->gb_vdroop_table_ckson_a0);
- param->ulGbVdroopTableCksonA1 =
- le32_to_cpu(profile->gb_vdroop_table_ckson_a1);
- param->ulGbVdroopTableCksonA2 =
- le32_to_cpu(profile->gb_vdroop_table_ckson_a2);
- param->ulGbFuseTableCksoffM1 =
- le32_to_cpu(profile->avfsgb_fuse_table_cksoff_m1);
- param->ulGbFuseTableCksoffM2 =
- le32_to_cpu(profile->avfsgb_fuse_table_cksoff_m2);
- param->ulGbFuseTableCksoffB =
- le32_to_cpu(profile->avfsgb_fuse_table_cksoff_b);
- param->ulGbFuseTableCksonM1 =
- le32_to_cpu(profile->avfsgb_fuse_table_ckson_m1);
- param->ulGbFuseTableCksonM2 =
- le32_to_cpu(profile->avfsgb_fuse_table_ckson_m2);
- param->ulGbFuseTableCksonB =
- le32_to_cpu(profile->avfsgb_fuse_table_ckson_b);
-
- param->ucEnableGbVdroopTableCkson =
- profile->enable_gb_vdroop_table_ckson;
- param->ucEnableGbFuseTableCkson =
- profile->enable_gb_fuse_table_ckson;
- param->usPsmAgeComfactor =
- le16_to_cpu(profile->psm_age_comfactor);
-
- param->ulDispclk2GfxclkM1 =
- le32_to_cpu(profile->dispclk2gfxclk_a);
- param->ulDispclk2GfxclkM2 =
- le32_to_cpu(profile->dispclk2gfxclk_b);
- param->ulDispclk2GfxclkB =
- le32_to_cpu(profile->dispclk2gfxclk_c);
- param->ulDcefclk2GfxclkM1 =
- le32_to_cpu(profile->dcefclk2gfxclk_a);
- param->ulDcefclk2GfxclkM2 =
- le32_to_cpu(profile->dcefclk2gfxclk_b);
- param->ulDcefclk2GfxclkB =
- le32_to_cpu(profile->dcefclk2gfxclk_c);
- param->ulPixelclk2GfxclkM1 =
- le32_to_cpu(profile->pixclk2gfxclk_a);
- param->ulPixelclk2GfxclkM2 =
- le32_to_cpu(profile->pixclk2gfxclk_b);
- param->ulPixelclk2GfxclkB =
- le32_to_cpu(profile->pixclk2gfxclk_c);
- param->ulPhyclk2GfxclkM1 =
- le32_to_cpu(profile->phyclk2gfxclk_a);
- param->ulPhyclk2GfxclkM2 =
- le32_to_cpu(profile->phyclk2gfxclk_b);
- param->ulPhyclk2GfxclkB =
- le32_to_cpu(profile->phyclk2gfxclk_c);
+ format_revision = ((struct atom_common_table_header *)profile)->format_revision;
+ content_revision = ((struct atom_common_table_header *)profile)->content_revision;
+
+ if (format_revision == 4 && content_revision == 1) {
+ param->ulMaxVddc = le32_to_cpu(profile->maxvddc);
+ param->ulMinVddc = le32_to_cpu(profile->minvddc);
+ param->ulMeanNsigmaAcontant0 =
+ le32_to_cpu(profile->avfs_meannsigma_acontant0);
+ param->ulMeanNsigmaAcontant1 =
+ le32_to_cpu(profile->avfs_meannsigma_acontant1);
+ param->ulMeanNsigmaAcontant2 =
+ le32_to_cpu(profile->avfs_meannsigma_acontant2);
+ param->usMeanNsigmaDcTolSigma =
+ le16_to_cpu(profile->avfs_meannsigma_dc_tol_sigma);
+ param->usMeanNsigmaPlatformMean =
+ le16_to_cpu(profile->avfs_meannsigma_platform_mean);
+ param->usMeanNsigmaPlatformSigma =
+ le16_to_cpu(profile->avfs_meannsigma_platform_sigma);
+ param->ulGbVdroopTableCksoffA0 =
+ le32_to_cpu(profile->gb_vdroop_table_cksoff_a0);
+ param->ulGbVdroopTableCksoffA1 =
+ le32_to_cpu(profile->gb_vdroop_table_cksoff_a1);
+ param->ulGbVdroopTableCksoffA2 =
+ le32_to_cpu(profile->gb_vdroop_table_cksoff_a2);
+ param->ulGbVdroopTableCksonA0 =
+ le32_to_cpu(profile->gb_vdroop_table_ckson_a0);
+ param->ulGbVdroopTableCksonA1 =
+ le32_to_cpu(profile->gb_vdroop_table_ckson_a1);
+ param->ulGbVdroopTableCksonA2 =
+ le32_to_cpu(profile->gb_vdroop_table_ckson_a2);
+ param->ulGbFuseTableCksoffM1 =
+ le32_to_cpu(profile->avfsgb_fuse_table_cksoff_m1);
+ param->ulGbFuseTableCksoffM2 =
+ le32_to_cpu(profile->avfsgb_fuse_table_cksoff_m2);
+ param->ulGbFuseTableCksoffB =
+ le32_to_cpu(profile->avfsgb_fuse_table_cksoff_b);
+ param->ulGbFuseTableCksonM1 =
+ le32_to_cpu(profile->avfsgb_fuse_table_ckson_m1);
+ param->ulGbFuseTableCksonM2 =
+ le32_to_cpu(profile->avfsgb_fuse_table_ckson_m2);
+ param->ulGbFuseTableCksonB =
+ le32_to_cpu(profile->avfsgb_fuse_table_ckson_b);
+
+ param->ucEnableGbVdroopTableCkson =
+ profile->enable_gb_vdroop_table_ckson;
+ param->ucEnableGbFuseTableCkson =
+ profile->enable_gb_fuse_table_ckson;
+ param->usPsmAgeComfactor =
+ le16_to_cpu(profile->psm_age_comfactor);
+
+ param->ulDispclk2GfxclkM1 =
+ le32_to_cpu(profile->dispclk2gfxclk_a);
+ param->ulDispclk2GfxclkM2 =
+ le32_to_cpu(profile->dispclk2gfxclk_b);
+ param->ulDispclk2GfxclkB =
+ le32_to_cpu(profile->dispclk2gfxclk_c);
+ param->ulDcefclk2GfxclkM1 =
+ le32_to_cpu(profile->dcefclk2gfxclk_a);
+ param->ulDcefclk2GfxclkM2 =
+ le32_to_cpu(profile->dcefclk2gfxclk_b);
+ param->ulDcefclk2GfxclkB =
+ le32_to_cpu(profile->dcefclk2gfxclk_c);
+ param->ulPixelclk2GfxclkM1 =
+ le32_to_cpu(profile->pixclk2gfxclk_a);
+ param->ulPixelclk2GfxclkM2 =
+ le32_to_cpu(profile->pixclk2gfxclk_b);
+ param->ulPixelclk2GfxclkB =
+ le32_to_cpu(profile->pixclk2gfxclk_c);
+ param->ulPhyclk2GfxclkM1 =
+ le32_to_cpu(profile->phyclk2gfxclk_a);
+ param->ulPhyclk2GfxclkM2 =
+ le32_to_cpu(profile->phyclk2gfxclk_b);
+ param->ulPhyclk2GfxclkB =
+ le32_to_cpu(profile->phyclk2gfxclk_c);
+ param->ulAcgGbVdroopTableA0 = 0;
+ param->ulAcgGbVdroopTableA1 = 0;
+ param->ulAcgGbVdroopTableA2 = 0;
+ param->ulAcgGbFuseTableM1 = 0;
+ param->ulAcgGbFuseTableM2 = 0;
+ param->ulAcgGbFuseTableB = 0;
+ param->ucAcgEnableGbVdroopTable = 0;
+ param->ucAcgEnableGbFuseTable = 0;
+ } else if (format_revision == 4 && content_revision == 2) {
+ profile_v4_2 = (struct atom_asic_profiling_info_v4_2 *)profile;
+ param->ulMaxVddc = le32_to_cpu(profile_v4_2->maxvddc);
+ param->ulMinVddc = le32_to_cpu(profile_v4_2->minvddc);
+ param->ulMeanNsigmaAcontant0 =
+ le32_to_cpu(profile_v4_2->avfs_meannsigma_acontant0);
+ param->ulMeanNsigmaAcontant1 =
+ le32_to_cpu(profile_v4_2->avfs_meannsigma_acontant1);
+ param->ulMeanNsigmaAcontant2 =
+ le32_to_cpu(profile_v4_2->avfs_meannsigma_acontant2);
+ param->usMeanNsigmaDcTolSigma =
+ le16_to_cpu(profile_v4_2->avfs_meannsigma_dc_tol_sigma);
+ param->usMeanNsigmaPlatformMean =
+ le16_to_cpu(profile_v4_2->avfs_meannsigma_platform_mean);
+ param->usMeanNsigmaPlatformSigma =
+ le16_to_cpu(profile_v4_2->avfs_meannsigma_platform_sigma);
+ param->ulGbVdroopTableCksoffA0 =
+ le32_to_cpu(profile_v4_2->gb_vdroop_table_cksoff_a0);
+ param->ulGbVdroopTableCksoffA1 =
+ le32_to_cpu(profile_v4_2->gb_vdroop_table_cksoff_a1);
+ param->ulGbVdroopTableCksoffA2 =
+ le32_to_cpu(profile_v4_2->gb_vdroop_table_cksoff_a2);
+ param->ulGbVdroopTableCksonA0 =
+ le32_to_cpu(profile_v4_2->gb_vdroop_table_ckson_a0);
+ param->ulGbVdroopTableCksonA1 =
+ le32_to_cpu(profile_v4_2->gb_vdroop_table_ckson_a1);
+ param->ulGbVdroopTableCksonA2 =
+ le32_to_cpu(profile_v4_2->gb_vdroop_table_ckson_a2);
+ param->ulGbFuseTableCksoffM1 =
+ le32_to_cpu(profile_v4_2->avfsgb_fuse_table_cksoff_m1);
+ param->ulGbFuseTableCksoffM2 =
+ le32_to_cpu(profile_v4_2->avfsgb_fuse_table_cksoff_m2);
+ param->ulGbFuseTableCksoffB =
+ le32_to_cpu(profile_v4_2->avfsgb_fuse_table_cksoff_b);
+ param->ulGbFuseTableCksonM1 =
+ le32_to_cpu(profile_v4_2->avfsgb_fuse_table_ckson_m1);
+ param->ulGbFuseTableCksonM2 =
+ le32_to_cpu(profile_v4_2->avfsgb_fuse_table_ckson_m2);
+ param->ulGbFuseTableCksonB =
+ le32_to_cpu(profile_v4_2->avfsgb_fuse_table_ckson_b);
+
+ param->ucEnableGbVdroopTableCkson =
+ profile_v4_2->enable_gb_vdroop_table_ckson;
+ param->ucEnableGbFuseTableCkson =
+ profile_v4_2->enable_gb_fuse_table_ckson;
+ param->usPsmAgeComfactor =
+ le16_to_cpu(profile_v4_2->psm_age_comfactor);
+
+ param->ulDispclk2GfxclkM1 =
+ le32_to_cpu(profile_v4_2->dispclk2gfxclk_a);
+ param->ulDispclk2GfxclkM2 =
+ le32_to_cpu(profile_v4_2->dispclk2gfxclk_b);
+ param->ulDispclk2GfxclkB =
+ le32_to_cpu(profile_v4_2->dispclk2gfxclk_c);
+ param->ulDcefclk2GfxclkM1 =
+ le32_to_cpu(profile_v4_2->dcefclk2gfxclk_a);
+ param->ulDcefclk2GfxclkM2 =
+ le32_to_cpu(profile_v4_2->dcefclk2gfxclk_b);
+ param->ulDcefclk2GfxclkB =
+ le32_to_cpu(profile_v4_2->dcefclk2gfxclk_c);
+ param->ulPixelclk2GfxclkM1 =
+ le32_to_cpu(profile_v4_2->pixclk2gfxclk_a);
+ param->ulPixelclk2GfxclkM2 =
+ le32_to_cpu(profile_v4_2->pixclk2gfxclk_b);
+ param->ulPixelclk2GfxclkB =
+ le32_to_cpu(profile_v4_2->pixclk2gfxclk_c);
+ param->ulPhyclk2GfxclkM1 =
+ le32_to_cpu(profile->phyclk2gfxclk_a);
+ param->ulPhyclk2GfxclkM2 =
+ le32_to_cpu(profile_v4_2->phyclk2gfxclk_b);
+ param->ulPhyclk2GfxclkB =
+ le32_to_cpu(profile_v4_2->phyclk2gfxclk_c);
+ param->ulAcgGbVdroopTableA0 = le32_to_cpu(profile_v4_2->acg_gb_vdroop_table_a0);
+ param->ulAcgGbVdroopTableA1 = le32_to_cpu(profile_v4_2->acg_gb_vdroop_table_a1);
+ param->ulAcgGbVdroopTableA2 = le32_to_cpu(profile_v4_2->acg_gb_vdroop_table_a2);
+ param->ulAcgGbFuseTableM1 = le32_to_cpu(profile_v4_2->acg_avfsgb_fuse_table_m1);
+ param->ulAcgGbFuseTableM2 = le32_to_cpu(profile_v4_2->acg_avfsgb_fuse_table_m2);
+ param->ulAcgGbFuseTableB = le32_to_cpu(profile_v4_2->acg_avfsgb_fuse_table_b);
+ param->ucAcgEnableGbVdroopTable = le32_to_cpu(profile_v4_2->enable_acg_gb_vdroop_table);
+ param->ucAcgEnableGbFuseTable = le32_to_cpu(profile_v4_2->enable_acg_gb_fuse_table);
+ } else {
+ pr_info("Invalid VBIOS AVFS ProfilingInfo Revision!\n");
+ return -EINVAL;
+ }
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h
index 81908b5cfd5f..8e6b1f0ddebc 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h
@@ -109,6 +109,14 @@ struct pp_atomfwctrl_avfs_parameters {
uint32_t ulPhyclk2GfxclkM1;
uint32_t ulPhyclk2GfxclkM2;
uint32_t ulPhyclk2GfxclkB;
+ uint32_t ulAcgGbVdroopTableA0;
+ uint32_t ulAcgGbVdroopTableA1;
+ uint32_t ulAcgGbVdroopTableA2;
+ uint32_t ulAcgGbFuseTableM1;
+ uint32_t ulAcgGbFuseTableM2;
+ uint32_t ulAcgGbFuseTableB;
+ uint32_t ucAcgEnableGbVdroopTable;
+ uint32_t ucAcgEnableGbFuseTable;
};
struct pp_atomfwctrl_gpio_parameters {
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c
index 4c7f430b36eb..2c3e6baf2524 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c
@@ -265,6 +265,15 @@ static int rv_tf_set_clock_limit(struct pp_hwmgr *hwmgr, void *input,
}
} */
+ if (((hwmgr->uvd_arbiter.vclk_soft_min / 100) != rv_data->vclk_soft_min) ||
+ ((hwmgr->uvd_arbiter.dclk_soft_min / 100) != rv_data->dclk_soft_min)) {
+ rv_data->vclk_soft_min = hwmgr->uvd_arbiter.vclk_soft_min / 100;
+ rv_data->dclk_soft_min = hwmgr->uvd_arbiter.dclk_soft_min / 100;
+ smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_SetSoftMinVcn,
+ (rv_data->vclk_soft_min << 16) | rv_data->vclk_soft_min);
+ }
+
if((hwmgr->gfx_arbiter.sclk_hard_min != 0) &&
((hwmgr->gfx_arbiter.sclk_hard_min / 100) != rv_data->soc_actual_hard_min_freq)) {
smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
@@ -308,8 +317,8 @@ static int rv_tf_set_num_active_display(struct pp_hwmgr *hwmgr, void *input,
}
static const struct phm_master_table_item rv_set_power_state_list[] = {
- { NULL, rv_tf_set_clock_limit },
- { NULL, rv_tf_set_num_active_display },
+ { .tableFunction = rv_tf_set_clock_limit },
+ { .tableFunction = rv_tf_set_num_active_display },
{ }
};
@@ -382,7 +391,7 @@ static int rv_tf_disable_gfx_off(struct pp_hwmgr *hwmgr,
}
static const struct phm_master_table_item rv_disable_dpm_list[] = {
- {NULL, rv_tf_disable_gfx_off},
+ { .tableFunction = rv_tf_disable_gfx_off },
{ },
};
@@ -407,7 +416,7 @@ static int rv_tf_enable_gfx_off(struct pp_hwmgr *hwmgr,
}
static const struct phm_master_table_item rv_enable_dpm_list[] = {
- {NULL, rv_tf_enable_gfx_off},
+ { .tableFunction = rv_tf_enable_gfx_off },
{ },
};
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.h
index afb852295a15..2472b50e54cf 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.h
@@ -280,6 +280,8 @@ struct rv_hwmgr {
uint32_t f_actual_hard_min_freq;
uint32_t fabric_actual_soft_min_freq;
+ uint32_t vclk_soft_min;
+ uint32_t dclk_soft_min;
uint32_t gfx_actual_soft_min_freq;
bool vcn_power_gated;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index 1f01020ce3a9..c2743233ba10 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -1962,9 +1962,6 @@ static int smu7_thermal_parameter_init(struct pp_hwmgr *hwmgr)
temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, DPM_ENABLED, 0x1);
break;
default:
- PP_ASSERT_WITH_CODE(0,
- "Failed to setup PCC HW register! Wrong GPIO assigned for VDDC_PCC_GPIO_PINID!",
- );
break;
}
cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCNB_PWRMGT_CNTL, temp_reg);
@@ -4630,6 +4627,15 @@ static int smu7_set_power_profile_state(struct pp_hwmgr *hwmgr,
static int smu7_avfs_control(struct pp_hwmgr *hwmgr, bool enable)
{
+ struct pp_smumgr *smumgr = (struct pp_smumgr *)(hwmgr->smumgr);
+ struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend);
+
+ if (smu_data == NULL)
+ return -EINVAL;
+
+ if (smu_data->avfs.avfs_btc_status == AVFS_BTC_NOTSUPPORTED)
+ return 0;
+
if (enable) {
if (!PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
CGS_IND_REG__SMC, FEATURE_STATUS, AVS_ON))
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
index 197174e562d2..f8f02e70b8bc 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
@@ -78,6 +78,8 @@ uint32_t channel_number[] = {1, 2, 0, 4, 0, 8, 0, 16, 2};
#define DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK 0x000000F0L
#define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel_MASK 0x00000700L
#define DF_CS_AON0_DramBaseAddress0__DramBaseAddr_MASK 0xFFFFF000L
+static int vega10_force_clock_level(struct pp_hwmgr *hwmgr,
+ enum pp_clock_type type, uint32_t mask);
const ULONG PhwVega10_Magic = (ULONG)(PHM_VIslands_Magic);
@@ -146,6 +148,19 @@ static void vega10_set_default_registry_data(struct pp_hwmgr *hwmgr)
data->registry_data.vr1hot_enabled = 1;
data->registry_data.regulator_hot_gpio_support = 1;
+ data->registry_data.didt_support = 1;
+ if (data->registry_data.didt_support) {
+ data->registry_data.didt_mode = 6;
+ data->registry_data.sq_ramping_support = 1;
+ data->registry_data.db_ramping_support = 0;
+ data->registry_data.td_ramping_support = 0;
+ data->registry_data.tcp_ramping_support = 0;
+ data->registry_data.dbr_ramping_support = 0;
+ data->registry_data.edc_didt_support = 1;
+ data->registry_data.gc_didt_support = 0;
+ data->registry_data.psm_didt_support = 0;
+ }
+
data->display_voltage_mode = PPVEGA10_VEGA10DISPLAYVOLTAGEMODE_DFLT;
data->dcef_clk_quad_eqn_a = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
data->dcef_clk_quad_eqn_b = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
@@ -223,6 +238,8 @@ static int vega10_set_features_platform_caps(struct pp_hwmgr *hwmgr)
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_PowerContainment);
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_DiDtSupport);
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SQRamping);
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_DBRamping);
@@ -230,6 +247,34 @@ static int vega10_set_features_platform_caps(struct pp_hwmgr *hwmgr)
PHM_PlatformCaps_TDRamping);
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_TCPRamping);
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_DBRRamping);
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_DiDtEDCEnable);
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_GCEDC);
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_PSM);
+
+ if (data->registry_data.didt_support) {
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DiDtSupport);
+ if (data->registry_data.sq_ramping_support)
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping);
+ if (data->registry_data.db_ramping_support)
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRamping);
+ if (data->registry_data.td_ramping_support)
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TDRamping);
+ if (data->registry_data.tcp_ramping_support)
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping);
+ if (data->registry_data.dbr_ramping_support)
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRRamping);
+ if (data->registry_data.edc_didt_support)
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DiDtEDCEnable);
+ if (data->registry_data.gc_didt_support)
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_GCEDC);
+ if (data->registry_data.psm_didt_support)
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PSM);
+ }
if (data->registry_data.power_containment_support)
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
@@ -321,8 +366,8 @@ static void vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr)
FEATURE_LED_DISPLAY_BIT;
data->smu_features[GNLD_FAN_CONTROL].smu_feature_id =
FEATURE_FAN_CONTROL_BIT;
- data->smu_features[GNLD_VOLTAGE_CONTROLLER].smu_feature_id =
- FEATURE_VOLTAGE_CONTROLLER_BIT;
+ data->smu_features[GNLD_ACG].smu_feature_id = FEATURE_ACG_BIT;
+ data->smu_features[GNLD_DIDT].smu_feature_id = FEATURE_GFX_EDC_BIT;
if (!data->registry_data.prefetcher_dpm_key_disabled)
data->smu_features[GNLD_DPM_PREFETCHER].supported = true;
@@ -386,6 +431,15 @@ static void vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr)
if (data->registry_data.vr0hot_enabled)
data->smu_features[GNLD_VR0HOT].supported = true;
+ smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_GetSmuVersion);
+ vega10_read_arg_from_smc(hwmgr->smumgr, &(data->smu_version));
+ /* ACG firmware has major version 5 */
+ if ((data->smu_version & 0xff000000) == 0x5000000)
+ data->smu_features[GNLD_ACG].supported = true;
+
+ if (data->registry_data.didt_support)
+ data->smu_features[GNLD_DIDT].supported = true;
+
}
#ifdef PPLIB_VEGA10_EVV_SUPPORT
@@ -1504,7 +1558,8 @@ static int vega10_populate_smc_link_levels(struct pp_hwmgr *hwmgr)
*/
static int vega10_populate_single_gfx_level(struct pp_hwmgr *hwmgr,
- uint32_t gfx_clock, PllSetting_t *current_gfxclk_level)
+ uint32_t gfx_clock, PllSetting_t *current_gfxclk_level,
+ uint32_t *acg_freq)
{
struct phm_ppt_v2_information *table_info =
(struct phm_ppt_v2_information *)(hwmgr->pptable);
@@ -1555,6 +1610,8 @@ static int vega10_populate_single_gfx_level(struct pp_hwmgr *hwmgr,
cpu_to_le16(dividers.usPll_ss_slew_frac);
current_gfxclk_level->Did = (uint8_t)(dividers.ulDid);
+ *acg_freq = gfx_clock / 100; /* 100 Khz to Mhz conversion */
+
return 0;
}
@@ -1635,7 +1692,8 @@ static int vega10_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
for (i = 0; i < dpm_table->count; i++) {
result = vega10_populate_single_gfx_level(hwmgr,
dpm_table->dpm_levels[i].value,
- &(pp_table->GfxclkLevel[i]));
+ &(pp_table->GfxclkLevel[i]),
+ &(pp_table->AcgFreqTable[i]));
if (result)
return result;
}
@@ -1644,7 +1702,8 @@ static int vega10_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
while (i < NUM_GFXCLK_DPM_LEVELS) {
result = vega10_populate_single_gfx_level(hwmgr,
dpm_table->dpm_levels[j].value,
- &(pp_table->GfxclkLevel[i]));
+ &(pp_table->GfxclkLevel[i]),
+ &(pp_table->AcgFreqTable[i]));
if (result)
return result;
i++;
@@ -2222,6 +2281,21 @@ static int vega10_populate_avfs_parameters(struct pp_hwmgr *hwmgr)
pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m1_shift = 24;
pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m2_shift = 12;
pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].b_shift = 12;
+
+ pp_table->AcgBtcGbVdroopTable.a0 = avfs_params.ulAcgGbVdroopTableA0;
+ pp_table->AcgBtcGbVdroopTable.a0_shift = 20;
+ pp_table->AcgBtcGbVdroopTable.a1 = avfs_params.ulAcgGbVdroopTableA1;
+ pp_table->AcgBtcGbVdroopTable.a1_shift = 20;
+ pp_table->AcgBtcGbVdroopTable.a2 = avfs_params.ulAcgGbVdroopTableA2;
+ pp_table->AcgBtcGbVdroopTable.a2_shift = 20;
+
+ pp_table->AcgAvfsGb.m1 = avfs_params.ulAcgGbFuseTableM1;
+ pp_table->AcgAvfsGb.m2 = avfs_params.ulAcgGbFuseTableM2;
+ pp_table->AcgAvfsGb.b = avfs_params.ulAcgGbFuseTableB;
+ pp_table->AcgAvfsGb.m1_shift = 0;
+ pp_table->AcgAvfsGb.m2_shift = 0;
+ pp_table->AcgAvfsGb.b_shift = 0;
+
} else {
data->smu_features[GNLD_AVFS].supported = false;
}
@@ -2230,6 +2304,55 @@ static int vega10_populate_avfs_parameters(struct pp_hwmgr *hwmgr)
return 0;
}
+static int vega10_acg_enable(struct pp_hwmgr *hwmgr)
+{
+ struct vega10_hwmgr *data =
+ (struct vega10_hwmgr *)(hwmgr->backend);
+ uint32_t agc_btc_response;
+
+ if (data->smu_features[GNLD_ACG].supported) {
+ if (0 == vega10_enable_smc_features(hwmgr->smumgr, true,
+ data->smu_features[GNLD_DPM_PREFETCHER].smu_feature_bitmap))
+ data->smu_features[GNLD_DPM_PREFETCHER].enabled = true;
+
+ smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_InitializeAcg);
+
+ smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_RunAcgBtc);
+ vega10_read_arg_from_smc(hwmgr->smumgr, &agc_btc_response);
+
+ if (1 == agc_btc_response) {
+ if (1 == data->acg_loop_state)
+ smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_RunAcgInClosedLoop);
+ else if (2 == data->acg_loop_state)
+ smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_RunAcgInOpenLoop);
+ if (0 == vega10_enable_smc_features(hwmgr->smumgr, true,
+ data->smu_features[GNLD_ACG].smu_feature_bitmap))
+ data->smu_features[GNLD_ACG].enabled = true;
+ } else {
+ pr_info("[ACG_Enable] ACG BTC Returned Failed Status!\n");
+ data->smu_features[GNLD_ACG].enabled = false;
+ }
+ }
+
+ return 0;
+}
+
+static int vega10_acg_disable(struct pp_hwmgr *hwmgr)
+{
+ struct vega10_hwmgr *data =
+ (struct vega10_hwmgr *)(hwmgr->backend);
+
+ if (data->smu_features[GNLD_ACG].supported) {
+ if (data->smu_features[GNLD_ACG].enabled) {
+ if (0 == vega10_enable_smc_features(hwmgr->smumgr, false,
+ data->smu_features[GNLD_ACG].smu_feature_bitmap))
+ data->smu_features[GNLD_ACG].enabled = false;
+ }
+ }
+
+ return 0;
+}
+
static int vega10_populate_gpio_parameters(struct pp_hwmgr *hwmgr)
{
struct vega10_hwmgr *data =
@@ -2404,6 +2527,9 @@ static int vega10_init_smc_table(struct pp_hwmgr *hwmgr)
pp_table->DisplayDpmVoltageMode =
(uint8_t)(table_info->uc_dcef_dpm_voltage_mode);
+ data->vddc_voltage_table.psi0_enable = voltage_table.psi0_enable;
+ data->vddc_voltage_table.psi1_enable = voltage_table.psi1_enable;
+
if (data->registry_data.ulv_support &&
table_info->us_ulv_voltage_offset) {
result = vega10_populate_ulv_state(hwmgr);
@@ -2500,7 +2626,7 @@ static int vega10_init_smc_table(struct pp_hwmgr *hwmgr)
result = vega10_avfs_enable(hwmgr, true);
PP_ASSERT_WITH_CODE(!result, "Attempt to enable AVFS feature Failed!",
return result);
-
+ vega10_acg_enable(hwmgr);
vega10_save_default_power_profile(hwmgr);
return 0;
@@ -2832,6 +2958,11 @@ static int vega10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE(!tmp_result,
"Failed to start DPM!", result = tmp_result);
+ /* enable didt, do not abort if failed didt */
+ tmp_result = vega10_enable_didt_config(hwmgr);
+ PP_ASSERT(!tmp_result,
+ "Failed to enable didt config!");
+
tmp_result = vega10_enable_power_containment(hwmgr);
PP_ASSERT_WITH_CODE(!tmp_result,
"Failed to enable power containment!",
@@ -3578,10 +3709,22 @@ static void vega10_apply_dal_minimum_voltage_request(
return;
}
+static int vega10_get_soc_index_for_max_uclk(struct pp_hwmgr *hwmgr)
+{
+ struct phm_ppt_v1_clock_voltage_dependency_table *vdd_dep_table_on_mclk;
+ struct phm_ppt_v2_information *table_info =
+ (struct phm_ppt_v2_information *)(hwmgr->pptable);
+
+ vdd_dep_table_on_mclk = table_info->vdd_dep_on_mclk;
+
+ return vdd_dep_table_on_mclk->entries[NUM_UCLK_DPM_LEVELS - 1].vddInd + 1;
+}
+
static int vega10_upload_dpm_bootup_level(struct pp_hwmgr *hwmgr)
{
struct vega10_hwmgr *data =
(struct vega10_hwmgr *)(hwmgr->backend);
+ uint32_t socclk_idx;
vega10_apply_dal_minimum_voltage_request(hwmgr);
@@ -3602,13 +3745,22 @@ static int vega10_upload_dpm_bootup_level(struct pp_hwmgr *hwmgr)
if (!data->registry_data.mclk_dpm_key_disabled) {
if (data->smc_state_table.mem_boot_level !=
data->dpm_table.mem_table.dpm_state.soft_min_level) {
+ if (data->smc_state_table.mem_boot_level == NUM_UCLK_DPM_LEVELS - 1) {
+ socclk_idx = vega10_get_soc_index_for_max_uclk(hwmgr);
PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter(
- hwmgr->smumgr,
- PPSMC_MSG_SetSoftMinUclkByIndex,
- data->smc_state_table.mem_boot_level),
- "Failed to set soft min mclk index!",
- return -EINVAL);
-
+ hwmgr->smumgr,
+ PPSMC_MSG_SetSoftMinSocclkByIndex,
+ socclk_idx),
+ "Failed to set soft min uclk index!",
+ return -EINVAL);
+ } else {
+ PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter(
+ hwmgr->smumgr,
+ PPSMC_MSG_SetSoftMinUclkByIndex,
+ data->smc_state_table.mem_boot_level),
+ "Failed to set soft min uclk index!",
+ return -EINVAL);
+ }
data->dpm_table.mem_table.dpm_state.soft_min_level =
data->smc_state_table.mem_boot_level;
}
@@ -4015,7 +4167,7 @@ static int vega10_notify_smc_display_config_after_ps_adjustment(
pr_info("Attempt to set Hard Min for DCEFCLK Failed!");
}
} else {
- pr_info("Cannot find requested DCEFCLK!");
+ pr_debug("Cannot find requested DCEFCLK!");
}
if (min_clocks.memoryClock != 0) {
@@ -4097,34 +4249,30 @@ static int vega10_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
return 0;
}
-static int vega10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
- enum amd_dpm_forced_level level)
+static int vega10_get_profiling_clk_mask(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level,
+ uint32_t *sclk_mask, uint32_t *mclk_mask, uint32_t *soc_mask)
{
- int ret = 0;
+ struct phm_ppt_v2_information *table_info =
+ (struct phm_ppt_v2_information *)(hwmgr->pptable);
- switch (level) {
- case AMD_DPM_FORCED_LEVEL_HIGH:
- ret = vega10_force_dpm_highest(hwmgr);
- if (ret)
- return ret;
- break;
- case AMD_DPM_FORCED_LEVEL_LOW:
- ret = vega10_force_dpm_lowest(hwmgr);
- if (ret)
- return ret;
- break;
- case AMD_DPM_FORCED_LEVEL_AUTO:
- ret = vega10_unforce_dpm_levels(hwmgr);
- if (ret)
- return ret;
- break;
- default:
- break;
+ if (table_info->vdd_dep_on_sclk->count > VEGA10_UMD_PSTATE_GFXCLK_LEVEL &&
+ table_info->vdd_dep_on_socclk->count > VEGA10_UMD_PSTATE_SOCCLK_LEVEL &&
+ table_info->vdd_dep_on_mclk->count > VEGA10_UMD_PSTATE_MCLK_LEVEL) {
+ *sclk_mask = VEGA10_UMD_PSTATE_GFXCLK_LEVEL;
+ *soc_mask = VEGA10_UMD_PSTATE_SOCCLK_LEVEL;
+ *mclk_mask = VEGA10_UMD_PSTATE_MCLK_LEVEL;
}
- hwmgr->dpm_level = level;
-
- return ret;
+ if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
+ *sclk_mask = 0;
+ } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) {
+ *mclk_mask = 0;
+ } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
+ *sclk_mask = table_info->vdd_dep_on_sclk->count - 1;
+ *soc_mask = table_info->vdd_dep_on_socclk->count - 1;
+ *mclk_mask = table_info->vdd_dep_on_mclk->count - 1;
+ }
+ return 0;
}
static int vega10_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
@@ -4151,6 +4299,86 @@ static int vega10_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
return result;
}
+static int vega10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
+ enum amd_dpm_forced_level level)
+{
+ int ret = 0;
+ uint32_t sclk_mask = 0;
+ uint32_t mclk_mask = 0;
+ uint32_t soc_mask = 0;
+ uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
+ AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
+ AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
+ AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
+
+ if (level == hwmgr->dpm_level)
+ return ret;
+
+ if (!(hwmgr->dpm_level & profile_mode_mask)) {
+ /* enter profile mode, save current level, disable gfx cg*/
+ if (level & profile_mode_mask) {
+ hwmgr->saved_dpm_level = hwmgr->dpm_level;
+ cgs_set_clockgating_state(hwmgr->device,
+ AMD_IP_BLOCK_TYPE_GFX,
+ AMD_CG_STATE_UNGATE);
+ }
+ } else {
+ /* exit profile mode, restore level, enable gfx cg*/
+ if (!(level & profile_mode_mask)) {
+ if (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
+ level = hwmgr->saved_dpm_level;
+ cgs_set_clockgating_state(hwmgr->device,
+ AMD_IP_BLOCK_TYPE_GFX,
+ AMD_CG_STATE_GATE);
+ }
+ }
+
+ switch (level) {
+ case AMD_DPM_FORCED_LEVEL_HIGH:
+ ret = vega10_force_dpm_highest(hwmgr);
+ if (ret)
+ return ret;
+ hwmgr->dpm_level = level;
+ break;
+ case AMD_DPM_FORCED_LEVEL_LOW:
+ ret = vega10_force_dpm_lowest(hwmgr);
+ if (ret)
+ return ret;
+ hwmgr->dpm_level = level;
+ break;
+ case AMD_DPM_FORCED_LEVEL_AUTO:
+ ret = vega10_unforce_dpm_levels(hwmgr);
+ if (ret)
+ return ret;
+ hwmgr->dpm_level = level;
+ break;
+ case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
+ ret = vega10_get_profiling_clk_mask(hwmgr, level, &sclk_mask, &mclk_mask, &soc_mask);
+ if (ret)
+ return ret;
+ hwmgr->dpm_level = level;
+ vega10_force_clock_level(hwmgr, PP_SCLK, 1<<sclk_mask);
+ vega10_force_clock_level(hwmgr, PP_MCLK, 1<<mclk_mask);
+ break;
+ case AMD_DPM_FORCED_LEVEL_MANUAL:
+ hwmgr->dpm_level = level;
+ break;
+ case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
+ default:
+ break;
+ }
+
+ if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->saved_dpm_level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
+ vega10_set_fan_control_mode(hwmgr, AMD_FAN_CTRL_NONE);
+ else if (level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->saved_dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
+ vega10_set_fan_control_mode(hwmgr, AMD_FAN_CTRL_AUTO);
+
+ return 0;
+}
+
static int vega10_get_fan_control_mode(struct pp_hwmgr *hwmgr)
{
struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
@@ -4396,7 +4624,9 @@ static int vega10_force_clock_level(struct pp_hwmgr *hwmgr,
struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
int i;
- if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
+ if (hwmgr->dpm_level & (AMD_DPM_FORCED_LEVEL_AUTO |
+ AMD_DPM_FORCED_LEVEL_LOW |
+ AMD_DPM_FORCED_LEVEL_HIGH))
return -EINVAL;
switch (type) {
@@ -4661,6 +4891,10 @@ static int vega10_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE((tmp_result == 0),
"Failed to disable power containment!", result = tmp_result);
+ tmp_result = vega10_disable_didt_config(hwmgr);
+ PP_ASSERT_WITH_CODE((tmp_result == 0),
+ "Failed to disable didt config!", result = tmp_result);
+
tmp_result = vega10_avfs_enable(hwmgr, false);
PP_ASSERT_WITH_CODE((tmp_result == 0),
"Failed to disable AVFS!", result = tmp_result);
@@ -4677,6 +4911,9 @@ static int vega10_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE((tmp_result == 0),
"Failed to disable ulv!", result = tmp_result);
+ tmp_result = vega10_acg_disable(hwmgr);
+ PP_ASSERT_WITH_CODE((tmp_result == 0),
+ "Failed to disable acg!", result = tmp_result);
return result;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h
index 6e5c5b99593b..676cd7735883 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h
@@ -64,7 +64,9 @@ enum {
GNLD_FW_CTF,
GNLD_LED_DISPLAY,
GNLD_FAN_CONTROL,
- GNLD_VOLTAGE_CONTROLLER,
+ GNLD_FEATURE_FAST_PPT_BIT,
+ GNLD_DIDT,
+ GNLD_ACG,
GNLD_FEATURES_MAX
};
@@ -230,7 +232,9 @@ struct vega10_registry_data {
uint8_t cac_support;
uint8_t clock_stretcher_support;
uint8_t db_ramping_support;
+ uint8_t didt_mode;
uint8_t didt_support;
+ uint8_t edc_didt_support;
uint8_t dynamic_state_patching_support;
uint8_t enable_pkg_pwr_tracking_feature;
uint8_t enable_tdc_limit_feature;
@@ -263,6 +267,9 @@ struct vega10_registry_data {
uint8_t tcp_ramping_support;
uint8_t tdc_support;
uint8_t td_ramping_support;
+ uint8_t dbr_ramping_support;
+ uint8_t gc_didt_support;
+ uint8_t psm_didt_support;
uint8_t thermal_out_gpio_support;
uint8_t thermal_support;
uint8_t fw_ctf_enabled;
@@ -381,6 +388,8 @@ struct vega10_hwmgr {
struct vega10_smc_state_table smc_state_table;
uint32_t config_telemetry;
+ uint32_t smu_version;
+ uint32_t acg_loop_state;
};
#define VEGA10_DPM2_NEAR_TDP_DEC 10
@@ -425,6 +434,10 @@ struct vega10_hwmgr {
#define PPVEGA10_VEGA10UCLKCLKAVERAGEALPHA_DFLT 25 /* 10% * 255 = 25 */
#define PPVEGA10_VEGA10GFXACTIVITYAVERAGEALPHA_DFLT 25 /* 10% * 255 = 25 */
+#define VEGA10_UMD_PSTATE_GFXCLK_LEVEL 0x3
+#define VEGA10_UMD_PSTATE_SOCCLK_LEVEL 0x3
+#define VEGA10_UMD_PSTATE_MCLK_LEVEL 0x2
+
extern int tonga_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwmgr);
extern int tonga_hwmgr_backend_fini(struct pp_hwmgr *hwmgr);
extern int tonga_get_mc_microcode_version (struct pp_hwmgr *hwmgr);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
index 3f72268e99bb..e7fa67063cdc 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
@@ -26,7 +26,1298 @@
#include "vega10_powertune.h"
#include "vega10_smumgr.h"
#include "vega10_ppsmc.h"
+#include "vega10_inc.h"
#include "pp_debug.h"
+#include "pp_soc15.h"
+
+static const struct vega10_didt_config_reg SEDiDtTuningCtrlConfig_Vega10[] =
+{
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ * Offset Mask Shift Value
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+ /* DIDT_SQ */
+ { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3853 },
+ { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3153 },
+
+ /* DIDT_TD */
+ { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x0dde },
+ { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x0dde },
+
+ /* DIDT_TCP */
+ { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3dde },
+ { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3dde },
+
+ /* DIDT_DB */
+ { ixDIDT_DB_TUNING_CTRL, DIDT_DB_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_DB_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3dde },
+ { ixDIDT_DB_TUNING_CTRL, DIDT_DB_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_DB_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3dde },
+
+ { 0xFFFFFFFF } /* End of list */
+};
+
+static const struct vega10_didt_config_reg SEDiDtCtrl3Config_vega10[] =
+{
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ * Offset Mask Shift Value
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+ /*DIDT_SQ_CTRL3 */
+ { ixDIDT_SQ_CTRL3, DIDT_SQ_CTRL3__GC_DIDT_ENABLE_MASK, DIDT_SQ_CTRL3__GC_DIDT_ENABLE__SHIFT, 0x0000 },
+ { ixDIDT_SQ_CTRL3, DIDT_SQ_CTRL3__GC_DIDT_CLK_EN_OVERRIDE_MASK, DIDT_SQ_CTRL3__GC_DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
+ { ixDIDT_SQ_CTRL3, DIDT_SQ_CTRL3__THROTTLE_POLICY_MASK, DIDT_SQ_CTRL3__THROTTLE_POLICY__SHIFT, 0x0003 },
+ { ixDIDT_SQ_CTRL3, DIDT_SQ_CTRL3__DIDT_TRIGGER_THROTTLE_LOWBIT_MASK, DIDT_SQ_CTRL3__DIDT_TRIGGER_THROTTLE_LOWBIT__SHIFT, 0x0000 },
+ { ixDIDT_SQ_CTRL3, DIDT_SQ_CTRL3__DIDT_POWER_LEVEL_LOWBIT_MASK, DIDT_SQ_CTRL3__DIDT_POWER_LEVEL_LOWBIT__SHIFT, 0x0000 },
+ { ixDIDT_SQ_CTRL3, DIDT_SQ_CTRL3__DIDT_STALL_PATTERN_BIT_NUMS_MASK, DIDT_SQ_CTRL3__DIDT_STALL_PATTERN_BIT_NUMS__SHIFT, 0x0003 },
+ { ixDIDT_SQ_CTRL3, DIDT_SQ_CTRL3__GC_DIDT_LEVEL_COMB_EN_MASK, DIDT_SQ_CTRL3__GC_DIDT_LEVEL_COMB_EN__SHIFT, 0x0000 },
+ { ixDIDT_SQ_CTRL3, DIDT_SQ_CTRL3__SE_DIDT_LEVEL_COMB_EN_MASK, DIDT_SQ_CTRL3__SE_DIDT_LEVEL_COMB_EN__SHIFT, 0x0000 },
+ { ixDIDT_SQ_CTRL3, DIDT_SQ_CTRL3__QUALIFY_STALL_EN_MASK, DIDT_SQ_CTRL3__QUALIFY_STALL_EN__SHIFT, 0x0000 },
+ { ixDIDT_SQ_CTRL3, DIDT_SQ_CTRL3__DIDT_STALL_SEL_MASK, DIDT_SQ_CTRL3__DIDT_STALL_SEL__SHIFT, 0x0000 },
+ { ixDIDT_SQ_CTRL3, DIDT_SQ_CTRL3__DIDT_FORCE_STALL_MASK, DIDT_SQ_CTRL3__DIDT_FORCE_STALL__SHIFT, 0x0000 },
+ { ixDIDT_SQ_CTRL3, DIDT_SQ_CTRL3__DIDT_STALL_DELAY_EN_MASK, DIDT_SQ_CTRL3__DIDT_STALL_DELAY_EN__SHIFT, 0x0000 },
+
+ /*DIDT_TCP_CTRL3 */
+ { ixDIDT_TCP_CTRL3, DIDT_TCP_CTRL3__GC_DIDT_ENABLE_MASK, DIDT_TCP_CTRL3__GC_DIDT_ENABLE__SHIFT, 0x0000 },
+ { ixDIDT_TCP_CTRL3, DIDT_TCP_CTRL3__GC_DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TCP_CTRL3__GC_DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
+ { ixDIDT_TCP_CTRL3, DIDT_TCP_CTRL3__THROTTLE_POLICY_MASK, DIDT_TCP_CTRL3__THROTTLE_POLICY__SHIFT, 0x0003 },
+ { ixDIDT_TCP_CTRL3, DIDT_TCP_CTRL3__DIDT_TRIGGER_THROTTLE_LOWBIT_MASK, DIDT_TCP_CTRL3__DIDT_TRIGGER_THROTTLE_LOWBIT__SHIFT, 0x0000 },
+ { ixDIDT_TCP_CTRL3, DIDT_TCP_CTRL3__DIDT_POWER_LEVEL_LOWBIT_MASK, DIDT_TCP_CTRL3__DIDT_POWER_LEVEL_LOWBIT__SHIFT, 0x0000 },
+ { ixDIDT_TCP_CTRL3, DIDT_TCP_CTRL3__DIDT_STALL_PATTERN_BIT_NUMS_MASK, DIDT_TCP_CTRL3__DIDT_STALL_PATTERN_BIT_NUMS__SHIFT, 0x0003 },
+ { ixDIDT_TCP_CTRL3, DIDT_TCP_CTRL3__GC_DIDT_LEVEL_COMB_EN_MASK, DIDT_TCP_CTRL3__GC_DIDT_LEVEL_COMB_EN__SHIFT, 0x0000 },
+ { ixDIDT_TCP_CTRL3, DIDT_TCP_CTRL3__SE_DIDT_LEVEL_COMB_EN_MASK, DIDT_TCP_CTRL3__SE_DIDT_LEVEL_COMB_EN__SHIFT, 0x0000 },
+ { ixDIDT_TCP_CTRL3, DIDT_TCP_CTRL3__QUALIFY_STALL_EN_MASK, DIDT_TCP_CTRL3__QUALIFY_STALL_EN__SHIFT, 0x0000 },
+ { ixDIDT_TCP_CTRL3, DIDT_TCP_CTRL3__DIDT_STALL_SEL_MASK, DIDT_TCP_CTRL3__DIDT_STALL_SEL__SHIFT, 0x0000 },
+ { ixDIDT_TCP_CTRL3, DIDT_TCP_CTRL3__DIDT_FORCE_STALL_MASK, DIDT_TCP_CTRL3__DIDT_FORCE_STALL__SHIFT, 0x0000 },
+ { ixDIDT_TCP_CTRL3, DIDT_TCP_CTRL3__DIDT_STALL_DELAY_EN_MASK, DIDT_TCP_CTRL3__DIDT_STALL_DELAY_EN__SHIFT, 0x0000 },
+
+ /*DIDT_TD_CTRL3 */
+ { ixDIDT_TD_CTRL3, DIDT_TD_CTRL3__GC_DIDT_ENABLE_MASK, DIDT_TD_CTRL3__GC_DIDT_ENABLE__SHIFT, 0x0000 },
+ { ixDIDT_TD_CTRL3, DIDT_TD_CTRL3__GC_DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TD_CTRL3__GC_DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
+ { ixDIDT_TD_CTRL3, DIDT_TD_CTRL3__THROTTLE_POLICY_MASK, DIDT_TD_CTRL3__THROTTLE_POLICY__SHIFT, 0x0003 },
+ { ixDIDT_TD_CTRL3, DIDT_TD_CTRL3__DIDT_TRIGGER_THROTTLE_LOWBIT_MASK, DIDT_TD_CTRL3__DIDT_TRIGGER_THROTTLE_LOWBIT__SHIFT, 0x0000 },
+ { ixDIDT_TD_CTRL3, DIDT_TD_CTRL3__DIDT_POWER_LEVEL_LOWBIT_MASK, DIDT_TD_CTRL3__DIDT_POWER_LEVEL_LOWBIT__SHIFT, 0x0000 },
+ { ixDIDT_TD_CTRL3, DIDT_TD_CTRL3__DIDT_STALL_PATTERN_BIT_NUMS_MASK, DIDT_TD_CTRL3__DIDT_STALL_PATTERN_BIT_NUMS__SHIFT, 0x0003 },
+ { ixDIDT_TD_CTRL3, DIDT_TD_CTRL3__GC_DIDT_LEVEL_COMB_EN_MASK, DIDT_TD_CTRL3__GC_DIDT_LEVEL_COMB_EN__SHIFT, 0x0000 },
+ { ixDIDT_TD_CTRL3, DIDT_TD_CTRL3__SE_DIDT_LEVEL_COMB_EN_MASK, DIDT_TD_CTRL3__SE_DIDT_LEVEL_COMB_EN__SHIFT, 0x0000 },
+ { ixDIDT_TD_CTRL3, DIDT_TD_CTRL3__QUALIFY_STALL_EN_MASK, DIDT_TD_CTRL3__QUALIFY_STALL_EN__SHIFT, 0x0000 },
+ { ixDIDT_TD_CTRL3, DIDT_TD_CTRL3__DIDT_STALL_SEL_MASK, DIDT_TD_CTRL3__DIDT_STALL_SEL__SHIFT, 0x0000 },
+ { ixDIDT_TD_CTRL3, DIDT_TD_CTRL3__DIDT_FORCE_STALL_MASK, DIDT_TD_CTRL3__DIDT_FORCE_STALL__SHIFT, 0x0000 },
+ { ixDIDT_TD_CTRL3, DIDT_TD_CTRL3__DIDT_STALL_DELAY_EN_MASK, DIDT_TD_CTRL3__DIDT_STALL_DELAY_EN__SHIFT, 0x0000 },
+
+ /*DIDT_DB_CTRL3 */
+ { ixDIDT_DB_CTRL3, DIDT_DB_CTRL3__GC_DIDT_ENABLE_MASK, DIDT_DB_CTRL3__GC_DIDT_ENABLE__SHIFT, 0x0000 },
+ { ixDIDT_DB_CTRL3, DIDT_DB_CTRL3__GC_DIDT_CLK_EN_OVERRIDE_MASK, DIDT_DB_CTRL3__GC_DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
+ { ixDIDT_DB_CTRL3, DIDT_DB_CTRL3__THROTTLE_POLICY_MASK, DIDT_DB_CTRL3__THROTTLE_POLICY__SHIFT, 0x0003 },
+ { ixDIDT_DB_CTRL3, DIDT_DB_CTRL3__DIDT_TRIGGER_THROTTLE_LOWBIT_MASK, DIDT_DB_CTRL3__DIDT_TRIGGER_THROTTLE_LOWBIT__SHIFT, 0x0000 },
+ { ixDIDT_DB_CTRL3, DIDT_DB_CTRL3__DIDT_POWER_LEVEL_LOWBIT_MASK, DIDT_DB_CTRL3__DIDT_POWER_LEVEL_LOWBIT__SHIFT, 0x0000 },
+ { ixDIDT_DB_CTRL3, DIDT_DB_CTRL3__DIDT_STALL_PATTERN_BIT_NUMS_MASK, DIDT_DB_CTRL3__DIDT_STALL_PATTERN_BIT_NUMS__SHIFT, 0x0003 },
+ { ixDIDT_DB_CTRL3, DIDT_DB_CTRL3__GC_DIDT_LEVEL_COMB_EN_MASK, DIDT_DB_CTRL3__GC_DIDT_LEVEL_COMB_EN__SHIFT, 0x0000 },
+ { ixDIDT_DB_CTRL3, DIDT_DB_CTRL3__SE_DIDT_LEVEL_COMB_EN_MASK, DIDT_DB_CTRL3__SE_DIDT_LEVEL_COMB_EN__SHIFT, 0x0000 },
+ { ixDIDT_DB_CTRL3, DIDT_DB_CTRL3__QUALIFY_STALL_EN_MASK, DIDT_DB_CTRL3__QUALIFY_STALL_EN__SHIFT, 0x0000 },
+ { ixDIDT_DB_CTRL3, DIDT_DB_CTRL3__DIDT_STALL_SEL_MASK, DIDT_DB_CTRL3__DIDT_STALL_SEL__SHIFT, 0x0000 },
+ { ixDIDT_DB_CTRL3, DIDT_DB_CTRL3__DIDT_FORCE_STALL_MASK, DIDT_DB_CTRL3__DIDT_FORCE_STALL__SHIFT, 0x0000 },
+ { ixDIDT_DB_CTRL3, DIDT_DB_CTRL3__DIDT_STALL_DELAY_EN_MASK, DIDT_DB_CTRL3__DIDT_STALL_DELAY_EN__SHIFT, 0x0000 },
+
+ { 0xFFFFFFFF } /* End of list */
+};
+
+static const struct vega10_didt_config_reg SEDiDtCtrl2Config_Vega10[] =
+{
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ * Offset Mask Shift Value
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+ /* DIDT_SQ */
+ { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__MAX_POWER_DELTA_MASK, DIDT_SQ_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3853 },
+ { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x00c0 },
+ { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000 },
+
+ /* DIDT_TD */
+ { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TD_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3fff },
+ { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x00c0 },
+ { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0001 },
+
+ /* DIDT_TCP */
+ { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TCP_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3dde },
+ { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x00c0 },
+ { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0001 },
+
+ /* DIDT_DB */
+ { ixDIDT_DB_CTRL2, DIDT_DB_CTRL2__MAX_POWER_DELTA_MASK, DIDT_DB_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3dde },
+ { ixDIDT_DB_CTRL2, DIDT_DB_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_DB_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x00c0 },
+ { ixDIDT_DB_CTRL2, DIDT_DB_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_DB_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0001 },
+
+ { 0xFFFFFFFF } /* End of list */
+};
+
+static const struct vega10_didt_config_reg SEDiDtCtrl1Config_Vega10[] =
+{
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ * Offset Mask Shift Value
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+ /* DIDT_SQ */
+ { ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MIN_POWER_MASK, DIDT_SQ_CTRL1__MIN_POWER__SHIFT, 0x0000 },
+ { ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MAX_POWER_MASK, DIDT_SQ_CTRL1__MAX_POWER__SHIFT, 0xffff },
+ /* DIDT_TD */
+ { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MIN_POWER_MASK, DIDT_TD_CTRL1__MIN_POWER__SHIFT, 0x0000 },
+ { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MAX_POWER_MASK, DIDT_TD_CTRL1__MAX_POWER__SHIFT, 0xffff },
+ /* DIDT_TCP */
+ { ixDIDT_TCP_CTRL1, DIDT_TCP_CTRL1__MIN_POWER_MASK, DIDT_TCP_CTRL1__MIN_POWER__SHIFT, 0x0000 },
+ { ixDIDT_TCP_CTRL1, DIDT_TCP_CTRL1__MAX_POWER_MASK, DIDT_TCP_CTRL1__MAX_POWER__SHIFT, 0xffff },
+ /* DIDT_DB */
+ { ixDIDT_DB_CTRL1, DIDT_DB_CTRL1__MIN_POWER_MASK, DIDT_DB_CTRL1__MIN_POWER__SHIFT, 0x0000 },
+ { ixDIDT_DB_CTRL1, DIDT_DB_CTRL1__MAX_POWER_MASK, DIDT_DB_CTRL1__MAX_POWER__SHIFT, 0xffff },
+
+ { 0xFFFFFFFF } /* End of list */
+};
+
+
+static const struct vega10_didt_config_reg SEDiDtWeightConfig_Vega10[] =
+{
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ * Offset Mask Shift Value
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+ /* DIDT_SQ */
+ { ixDIDT_SQ_WEIGHT0_3, 0xFFFFFFFF, 0, 0x2B363B1A },
+ { ixDIDT_SQ_WEIGHT4_7, 0xFFFFFFFF, 0, 0x270B2432 },
+ { ixDIDT_SQ_WEIGHT8_11, 0xFFFFFFFF, 0, 0x00000018 },
+
+ /* DIDT_TD */
+ { ixDIDT_TD_WEIGHT0_3, 0xFFFFFFFF, 0, 0x2B1D220F },
+ { ixDIDT_TD_WEIGHT4_7, 0xFFFFFFFF, 0, 0x00007558 },
+ { ixDIDT_TD_WEIGHT8_11, 0xFFFFFFFF, 0, 0x00000000 },
+
+ /* DIDT_TCP */
+ { ixDIDT_TCP_WEIGHT0_3, 0xFFFFFFFF, 0, 0x5ACE160D },
+ { ixDIDT_TCP_WEIGHT4_7, 0xFFFFFFFF, 0, 0x00000000 },
+ { ixDIDT_TCP_WEIGHT8_11, 0xFFFFFFFF, 0, 0x00000000 },
+
+ /* DIDT_DB */
+ { ixDIDT_DB_WEIGHT0_3, 0xFFFFFFFF, 0, 0x0E152A0F },
+ { ixDIDT_DB_WEIGHT4_7, 0xFFFFFFFF, 0, 0x09061813 },
+ { ixDIDT_DB_WEIGHT8_11, 0xFFFFFFFF, 0, 0x00000013 },
+
+ { 0xFFFFFFFF } /* End of list */
+};
+
+static const struct vega10_didt_config_reg SEDiDtCtrl0Config_Vega10[] =
+{
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ * Offset Mask Shift Value
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+ /* DIDT_SQ */
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0000 },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__PHASE_OFFSET_MASK, DIDT_SQ_CTRL0__PHASE_OFFSET__SHIFT, 0x0000 },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_RST_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000 },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_STALL_CTRL_EN_MASK, DIDT_SQ_CTRL0__DIDT_STALL_CTRL_EN__SHIFT, 0x0001 },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_TUNING_CTRL_EN_MASK, DIDT_SQ_CTRL0__DIDT_TUNING_CTRL_EN__SHIFT, 0x0001 },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_STALL_AUTO_RELEASE_EN_MASK, DIDT_SQ_CTRL0__DIDT_STALL_AUTO_RELEASE_EN__SHIFT, 0x0001 },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_SQ_CTRL0__DIDT_HI_POWER_THRESHOLD__SHIFT, 0xffff },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_AUTO_MPD_EN_MASK, DIDT_SQ_CTRL0__DIDT_AUTO_MPD_EN__SHIFT, 0x0000 },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_STALL_EVENT_EN_MASK, DIDT_SQ_CTRL0__DIDT_STALL_EVENT_EN__SHIFT, 0x0000 },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_STALL_EVENT_COUNTER_CLEAR_MASK, DIDT_SQ_CTRL0__DIDT_STALL_EVENT_COUNTER_CLEAR__SHIFT, 0x0000 },
+ /* DIDT_TD */
+ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK, DIDT_TD_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0000 },
+ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__PHASE_OFFSET_MASK, DIDT_TD_CTRL0__PHASE_OFFSET__SHIFT, 0x0000 },
+ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CTRL_RST_MASK, DIDT_TD_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000 },
+ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
+ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_STALL_CTRL_EN_MASK, DIDT_TD_CTRL0__DIDT_STALL_CTRL_EN__SHIFT, 0x0001 },
+ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_TUNING_CTRL_EN_MASK, DIDT_TD_CTRL0__DIDT_TUNING_CTRL_EN__SHIFT, 0x0001 },
+ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_STALL_AUTO_RELEASE_EN_MASK, DIDT_TD_CTRL0__DIDT_STALL_AUTO_RELEASE_EN__SHIFT, 0x0001 },
+ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_TD_CTRL0__DIDT_HI_POWER_THRESHOLD__SHIFT, 0xffff },
+ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_AUTO_MPD_EN_MASK, DIDT_TD_CTRL0__DIDT_AUTO_MPD_EN__SHIFT, 0x0000 },
+ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_STALL_EVENT_EN_MASK, DIDT_TD_CTRL0__DIDT_STALL_EVENT_EN__SHIFT, 0x0000 },
+ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_STALL_EVENT_COUNTER_CLEAR_MASK, DIDT_TD_CTRL0__DIDT_STALL_EVENT_COUNTER_CLEAR__SHIFT, 0x0000 },
+ /* DIDT_TCP */
+ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK, DIDT_TCP_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0000 },
+ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__PHASE_OFFSET_MASK, DIDT_TCP_CTRL0__PHASE_OFFSET__SHIFT, 0x0000 },
+ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CTRL_RST_MASK, DIDT_TCP_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000 },
+ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
+ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_STALL_CTRL_EN_MASK, DIDT_TCP_CTRL0__DIDT_STALL_CTRL_EN__SHIFT, 0x0001 },
+ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_TUNING_CTRL_EN_MASK, DIDT_TCP_CTRL0__DIDT_TUNING_CTRL_EN__SHIFT, 0x0001 },
+ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_STALL_AUTO_RELEASE_EN_MASK, DIDT_TCP_CTRL0__DIDT_STALL_AUTO_RELEASE_EN__SHIFT, 0x0001 },
+ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_TCP_CTRL0__DIDT_HI_POWER_THRESHOLD__SHIFT, 0xffff },
+ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_AUTO_MPD_EN_MASK, DIDT_TCP_CTRL0__DIDT_AUTO_MPD_EN__SHIFT, 0x0000 },
+ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_STALL_EVENT_EN_MASK, DIDT_TCP_CTRL0__DIDT_STALL_EVENT_EN__SHIFT, 0x0000 },
+ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_STALL_EVENT_COUNTER_CLEAR_MASK, DIDT_TCP_CTRL0__DIDT_STALL_EVENT_COUNTER_CLEAR__SHIFT, 0x0000 },
+ /* DIDT_DB */
+ { ixDIDT_DB_CTRL0, DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK, DIDT_DB_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0000 },
+ { ixDIDT_DB_CTRL0, DIDT_DB_CTRL0__PHASE_OFFSET_MASK, DIDT_DB_CTRL0__PHASE_OFFSET__SHIFT, 0x0000 },
+ { ixDIDT_DB_CTRL0, DIDT_DB_CTRL0__DIDT_CTRL_RST_MASK, DIDT_DB_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000 },
+ { ixDIDT_DB_CTRL0, DIDT_DB_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_DB_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
+ { ixDIDT_DB_CTRL0, DIDT_DB_CTRL0__DIDT_STALL_CTRL_EN_MASK, DIDT_DB_CTRL0__DIDT_STALL_CTRL_EN__SHIFT, 0x0001 },
+ { ixDIDT_DB_CTRL0, DIDT_DB_CTRL0__DIDT_TUNING_CTRL_EN_MASK, DIDT_DB_CTRL0__DIDT_TUNING_CTRL_EN__SHIFT, 0x0001 },
+ { ixDIDT_DB_CTRL0, DIDT_DB_CTRL0__DIDT_STALL_AUTO_RELEASE_EN_MASK, DIDT_DB_CTRL0__DIDT_STALL_AUTO_RELEASE_EN__SHIFT, 0x0001 },
+ { ixDIDT_DB_CTRL0, DIDT_DB_CTRL0__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_DB_CTRL0__DIDT_HI_POWER_THRESHOLD__SHIFT, 0xffff },
+ { ixDIDT_DB_CTRL0, DIDT_DB_CTRL0__DIDT_AUTO_MPD_EN_MASK, DIDT_DB_CTRL0__DIDT_AUTO_MPD_EN__SHIFT, 0x0000 },
+ { ixDIDT_DB_CTRL0, DIDT_DB_CTRL0__DIDT_STALL_EVENT_EN_MASK, DIDT_DB_CTRL0__DIDT_STALL_EVENT_EN__SHIFT, 0x0000 },
+ { ixDIDT_DB_CTRL0, DIDT_DB_CTRL0__DIDT_STALL_EVENT_COUNTER_CLEAR_MASK, DIDT_DB_CTRL0__DIDT_STALL_EVENT_COUNTER_CLEAR__SHIFT, 0x0000 },
+
+ { 0xFFFFFFFF } /* End of list */
+};
+
+
+static const struct vega10_didt_config_reg SEDiDtStallCtrlConfig_vega10[] =
+{
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ * Offset Mask Shift Value
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+ /* DIDT_SQ */
+ { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0004 },
+ { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0004 },
+ { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_SQ_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x000a },
+ { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_SQ_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x000a },
+
+ /* DIDT_TD */
+ { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001 },
+ { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001 },
+ { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_TD_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x000a },
+ { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_TD_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x000a },
+
+ /* DIDT_TCP */
+ { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001 },
+ { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001 },
+ { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_TCP_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x000a },
+ { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_TCP_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x000a },
+
+ /* DIDT_DB */
+ { ixDIDT_DB_STALL_CTRL, DIDT_DB_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_DB_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0004 },
+ { ixDIDT_DB_STALL_CTRL, DIDT_DB_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_DB_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0004 },
+ { ixDIDT_DB_STALL_CTRL, DIDT_DB_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_DB_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x000a },
+ { ixDIDT_DB_STALL_CTRL, DIDT_DB_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_DB_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x000a },
+
+ { 0xFFFFFFFF } /* End of list */
+};
+
+static const struct vega10_didt_config_reg SEDiDtStallPatternConfig_vega10[] =
+{
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ * Offset Mask Shift Value
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+ /* DIDT_SQ_STALL_PATTERN_1_2 */
+ { ixDIDT_SQ_STALL_PATTERN_1_2, DIDT_SQ_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_1_MASK, DIDT_SQ_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_1__SHIFT, 0x0001 },
+ { ixDIDT_SQ_STALL_PATTERN_1_2, DIDT_SQ_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_2_MASK, DIDT_SQ_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_2__SHIFT, 0x0001 },
+
+ /* DIDT_SQ_STALL_PATTERN_3_4 */
+ { ixDIDT_SQ_STALL_PATTERN_3_4, DIDT_SQ_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_3_MASK, DIDT_SQ_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_3__SHIFT, 0x0001 },
+ { ixDIDT_SQ_STALL_PATTERN_3_4, DIDT_SQ_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_4_MASK, DIDT_SQ_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_4__SHIFT, 0x0001 },
+
+ /* DIDT_SQ_STALL_PATTERN_5_6 */
+ { ixDIDT_SQ_STALL_PATTERN_5_6, DIDT_SQ_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_5_MASK, DIDT_SQ_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_5__SHIFT, 0x0000 },
+ { ixDIDT_SQ_STALL_PATTERN_5_6, DIDT_SQ_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_6_MASK, DIDT_SQ_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_6__SHIFT, 0x0000 },
+
+ /* DIDT_SQ_STALL_PATTERN_7 */
+ { ixDIDT_SQ_STALL_PATTERN_7, DIDT_SQ_STALL_PATTERN_7__DIDT_STALL_PATTERN_7_MASK, DIDT_SQ_STALL_PATTERN_7__DIDT_STALL_PATTERN_7__SHIFT, 0x0000 },
+
+ /* DIDT_TCP_STALL_PATTERN_1_2 */
+ { ixDIDT_TCP_STALL_PATTERN_1_2, DIDT_TCP_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_1_MASK, DIDT_TCP_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_1__SHIFT, 0x0001 },
+ { ixDIDT_TCP_STALL_PATTERN_1_2, DIDT_TCP_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_2_MASK, DIDT_TCP_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_2__SHIFT, 0x0001 },
+
+ /* DIDT_TCP_STALL_PATTERN_3_4 */
+ { ixDIDT_TCP_STALL_PATTERN_3_4, DIDT_TCP_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_3_MASK, DIDT_TCP_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_3__SHIFT, 0x0001 },
+ { ixDIDT_TCP_STALL_PATTERN_3_4, DIDT_TCP_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_4_MASK, DIDT_TCP_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_4__SHIFT, 0x0001 },
+
+ /* DIDT_TCP_STALL_PATTERN_5_6 */
+ { ixDIDT_TCP_STALL_PATTERN_5_6, DIDT_TCP_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_5_MASK, DIDT_TCP_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_5__SHIFT, 0x0000 },
+ { ixDIDT_TCP_STALL_PATTERN_5_6, DIDT_TCP_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_6_MASK, DIDT_TCP_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_6__SHIFT, 0x0000 },
+
+ /* DIDT_TCP_STALL_PATTERN_7 */
+ { ixDIDT_TCP_STALL_PATTERN_7, DIDT_TCP_STALL_PATTERN_7__DIDT_STALL_PATTERN_7_MASK, DIDT_TCP_STALL_PATTERN_7__DIDT_STALL_PATTERN_7__SHIFT, 0x0000 },
+
+ /* DIDT_TD_STALL_PATTERN_1_2 */
+ { ixDIDT_TD_STALL_PATTERN_1_2, DIDT_TD_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_1_MASK, DIDT_TD_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_1__SHIFT, 0x0001 },
+ { ixDIDT_TD_STALL_PATTERN_1_2, DIDT_TD_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_2_MASK, DIDT_TD_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_2__SHIFT, 0x0001 },
+
+ /* DIDT_TD_STALL_PATTERN_3_4 */
+ { ixDIDT_TD_STALL_PATTERN_3_4, DIDT_TD_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_3_MASK, DIDT_TD_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_3__SHIFT, 0x0001 },
+ { ixDIDT_TD_STALL_PATTERN_3_4, DIDT_TD_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_4_MASK, DIDT_TD_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_4__SHIFT, 0x0001 },
+
+ /* DIDT_TD_STALL_PATTERN_5_6 */
+ { ixDIDT_TD_STALL_PATTERN_5_6, DIDT_TD_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_5_MASK, DIDT_TD_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_5__SHIFT, 0x0000 },
+ { ixDIDT_TD_STALL_PATTERN_5_6, DIDT_TD_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_6_MASK, DIDT_TD_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_6__SHIFT, 0x0000 },
+
+ /* DIDT_TD_STALL_PATTERN_7 */
+ { ixDIDT_TD_STALL_PATTERN_7, DIDT_TD_STALL_PATTERN_7__DIDT_STALL_PATTERN_7_MASK, DIDT_TD_STALL_PATTERN_7__DIDT_STALL_PATTERN_7__SHIFT, 0x0000 },
+
+ /* DIDT_DB_STALL_PATTERN_1_2 */
+ { ixDIDT_DB_STALL_PATTERN_1_2, DIDT_DB_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_1_MASK, DIDT_DB_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_1__SHIFT, 0x0001 },
+ { ixDIDT_DB_STALL_PATTERN_1_2, DIDT_DB_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_2_MASK, DIDT_DB_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_2__SHIFT, 0x0001 },
+
+ /* DIDT_DB_STALL_PATTERN_3_4 */
+ { ixDIDT_DB_STALL_PATTERN_3_4, DIDT_DB_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_3_MASK, DIDT_DB_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_3__SHIFT, 0x0001 },
+ { ixDIDT_DB_STALL_PATTERN_3_4, DIDT_DB_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_4_MASK, DIDT_DB_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_4__SHIFT, 0x0001 },
+
+ /* DIDT_DB_STALL_PATTERN_5_6 */
+ { ixDIDT_DB_STALL_PATTERN_5_6, DIDT_DB_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_5_MASK, DIDT_DB_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_5__SHIFT, 0x0000 },
+ { ixDIDT_DB_STALL_PATTERN_5_6, DIDT_DB_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_6_MASK, DIDT_DB_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_6__SHIFT, 0x0000 },
+
+ /* DIDT_DB_STALL_PATTERN_7 */
+ { ixDIDT_DB_STALL_PATTERN_7, DIDT_DB_STALL_PATTERN_7__DIDT_STALL_PATTERN_7_MASK, DIDT_DB_STALL_PATTERN_7__DIDT_STALL_PATTERN_7__SHIFT, 0x0000 },
+
+ { 0xFFFFFFFF } /* End of list */
+};
+
+static const struct vega10_didt_config_reg SELCacConfig_Vega10[] =
+{
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ * Offset Mask Shift Value
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+ /* SQ */
+ { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x00060021 },
+ { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x00860021 },
+ { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x01060021 },
+ { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x01860021 },
+ { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x02060021 },
+ { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x02860021 },
+ { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x03060021 },
+ { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x03860021 },
+ { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x04060021 },
+ /* TD */
+ { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x000E0020 },
+ { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x008E0020 },
+ { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x010E0020 },
+ { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x018E0020 },
+ { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x020E0020 },
+ { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x028E0020 },
+ /* TCP */
+ { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x001c0020 },
+ { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x009c0020 },
+ { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x011c0020 },
+ { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x019c0020 },
+ { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x021c0020 },
+ /* DB */
+ { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x00200008 },
+ { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x00820008 },
+ { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x01020008 },
+ { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x01820008 },
+
+ { 0xFFFFFFFF } /* End of list */
+};
+
+
+static const struct vega10_didt_config_reg SEEDCStallPatternConfig_Vega10[] =
+{
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ * Offset Mask Shift Value
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+ /* SQ */
+ { ixDIDT_SQ_EDC_STALL_PATTERN_1_2, 0xFFFFFFFF, 0, 0x00030001 },
+ { ixDIDT_SQ_EDC_STALL_PATTERN_3_4, 0xFFFFFFFF, 0, 0x000F0007 },
+ { ixDIDT_SQ_EDC_STALL_PATTERN_5_6, 0xFFFFFFFF, 0, 0x003F001F },
+ { ixDIDT_SQ_EDC_STALL_PATTERN_7, 0xFFFFFFFF, 0, 0x0000007F },
+ /* TD */
+ { ixDIDT_TD_EDC_STALL_PATTERN_1_2, 0xFFFFFFFF, 0, 0x00000000 },
+ { ixDIDT_TD_EDC_STALL_PATTERN_3_4, 0xFFFFFFFF, 0, 0x00000000 },
+ { ixDIDT_TD_EDC_STALL_PATTERN_5_6, 0xFFFFFFFF, 0, 0x00000000 },
+ { ixDIDT_TD_EDC_STALL_PATTERN_7, 0xFFFFFFFF, 0, 0x00000000 },
+ /* TCP */
+ { ixDIDT_TCP_EDC_STALL_PATTERN_1_2, 0xFFFFFFFF, 0, 0x00000000 },
+ { ixDIDT_TCP_EDC_STALL_PATTERN_3_4, 0xFFFFFFFF, 0, 0x00000000 },
+ { ixDIDT_TCP_EDC_STALL_PATTERN_5_6, 0xFFFFFFFF, 0, 0x00000000 },
+ { ixDIDT_TCP_EDC_STALL_PATTERN_7, 0xFFFFFFFF, 0, 0x00000000 },
+ /* DB */
+ { ixDIDT_DB_EDC_STALL_PATTERN_1_2, 0xFFFFFFFF, 0, 0x00000000 },
+ { ixDIDT_DB_EDC_STALL_PATTERN_3_4, 0xFFFFFFFF, 0, 0x00000000 },
+ { ixDIDT_DB_EDC_STALL_PATTERN_5_6, 0xFFFFFFFF, 0, 0x00000000 },
+ { ixDIDT_DB_EDC_STALL_PATTERN_7, 0xFFFFFFFF, 0, 0x00000000 },
+
+ { 0xFFFFFFFF } /* End of list */
+};
+
+static const struct vega10_didt_config_reg SEEDCForceStallPatternConfig_Vega10[] =
+{
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ * Offset Mask Shift Value
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+ /* SQ */
+ { ixDIDT_SQ_EDC_STALL_PATTERN_1_2, 0xFFFFFFFF, 0, 0x00000015 },
+ { ixDIDT_SQ_EDC_STALL_PATTERN_3_4, 0xFFFFFFFF, 0, 0x00000000 },
+ { ixDIDT_SQ_EDC_STALL_PATTERN_5_6, 0xFFFFFFFF, 0, 0x00000000 },
+ { ixDIDT_SQ_EDC_STALL_PATTERN_7, 0xFFFFFFFF, 0, 0x00000000 },
+ /* TD */
+ { ixDIDT_TD_EDC_STALL_PATTERN_1_2, 0xFFFFFFFF, 0, 0x00000015 },
+ { ixDIDT_TD_EDC_STALL_PATTERN_3_4, 0xFFFFFFFF, 0, 0x00000000 },
+ { ixDIDT_TD_EDC_STALL_PATTERN_5_6, 0xFFFFFFFF, 0, 0x00000000 },
+ { ixDIDT_TD_EDC_STALL_PATTERN_7, 0xFFFFFFFF, 0, 0x00000000 },
+
+ { 0xFFFFFFFF } /* End of list */
+};
+
+static const struct vega10_didt_config_reg SEEDCStallDelayConfig_Vega10[] =
+{
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ * Offset Mask Shift Value
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+ /* SQ */
+ { ixDIDT_SQ_EDC_STALL_DELAY_1, 0xFFFFFFFF, 0, 0x00000000 },
+ { ixDIDT_SQ_EDC_STALL_DELAY_2, 0xFFFFFFFF, 0, 0x00000000 },
+ { ixDIDT_SQ_EDC_STALL_DELAY_3, 0xFFFFFFFF, 0, 0x00000000 },
+ { ixDIDT_SQ_EDC_STALL_DELAY_4, 0xFFFFFFFF, 0, 0x00000000 },
+ /* TD */
+ { ixDIDT_TD_EDC_STALL_DELAY_1, 0xFFFFFFFF, 0, 0x00000000 },
+ { ixDIDT_TD_EDC_STALL_DELAY_2, 0xFFFFFFFF, 0, 0x00000000 },
+ { ixDIDT_TD_EDC_STALL_DELAY_3, 0xFFFFFFFF, 0, 0x00000000 },
+ { ixDIDT_TD_EDC_STALL_DELAY_4, 0xFFFFFFFF, 0, 0x00000000 },
+ /* TCP */
+ { ixDIDT_TCP_EDC_STALL_DELAY_1, 0xFFFFFFFF, 0, 0x00000000 },
+ { ixDIDT_TCP_EDC_STALL_DELAY_2, 0xFFFFFFFF, 0, 0x00000000 },
+ { ixDIDT_TCP_EDC_STALL_DELAY_3, 0xFFFFFFFF, 0, 0x00000000 },
+ { ixDIDT_TCP_EDC_STALL_DELAY_4, 0xFFFFFFFF, 0, 0x00000000 },
+ /* DB */
+ { ixDIDT_DB_EDC_STALL_DELAY_1, 0xFFFFFFFF, 0, 0x00000000 },
+
+ { 0xFFFFFFFF } /* End of list */
+};
+
+static const struct vega10_didt_config_reg SEEDCThresholdConfig_Vega10[] =
+{
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ * Offset Mask Shift Value
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+ { ixDIDT_SQ_EDC_THRESHOLD, 0xFFFFFFFF, 0, 0x0000010E },
+ { ixDIDT_TD_EDC_THRESHOLD, 0xFFFFFFFF, 0, 0xFFFFFFFF },
+ { ixDIDT_TCP_EDC_THRESHOLD, 0xFFFFFFFF, 0, 0xFFFFFFFF },
+ { ixDIDT_DB_EDC_THRESHOLD, 0xFFFFFFFF, 0, 0xFFFFFFFF },
+
+ { 0xFFFFFFFF } /* End of list */
+};
+
+static const struct vega10_didt_config_reg SEEDCCtrlResetConfig_Vega10[] =
+{
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ * Offset Mask Shift Value
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+ /* SQ */
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_EN_MASK, DIDT_SQ_EDC_CTRL__EDC_EN__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_SW_RST_MASK, DIDT_SQ_EDC_CTRL__EDC_SW_RST__SHIFT, 0x0001 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_CLK_EN_OVERRIDE_MASK, DIDT_SQ_EDC_CTRL__EDC_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_FORCE_STALL_MASK, DIDT_SQ_EDC_CTRL__EDC_FORCE_STALL__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT_MASK, DIDT_SQ_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS_MASK, DIDT_SQ_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA_MASK, DIDT_SQ_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_EN_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_EN__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_STALL_POLICY_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_STALL_POLICY__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_LEVEL_COMB_EN_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_LEVEL_COMB_EN__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__SE_EDC_LEVEL_COMB_EN_MASK, DIDT_SQ_EDC_CTRL__SE_EDC_LEVEL_COMB_EN__SHIFT, 0x0000 },
+
+ { 0xFFFFFFFF } /* End of list */
+};
+
+static const struct vega10_didt_config_reg SEEDCCtrlConfig_Vega10[] =
+{
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ * Offset Mask Shift Value
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+ /* SQ */
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_EN_MASK, DIDT_SQ_EDC_CTRL__EDC_EN__SHIFT, 0x0001 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_SW_RST_MASK, DIDT_SQ_EDC_CTRL__EDC_SW_RST__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_CLK_EN_OVERRIDE_MASK, DIDT_SQ_EDC_CTRL__EDC_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_FORCE_STALL_MASK, DIDT_SQ_EDC_CTRL__EDC_FORCE_STALL__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT_MASK, DIDT_SQ_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT__SHIFT, 0x0004 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS_MASK, DIDT_SQ_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS__SHIFT, 0x0006 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA_MASK, DIDT_SQ_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_EN_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_EN__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_STALL_POLICY_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_STALL_POLICY__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_LEVEL_COMB_EN_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_LEVEL_COMB_EN__SHIFT, 0x0001 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__SE_EDC_LEVEL_COMB_EN_MASK, DIDT_SQ_EDC_CTRL__SE_EDC_LEVEL_COMB_EN__SHIFT, 0x0000 },
+
+ { 0xFFFFFFFF } /* End of list */
+};
+
+static const struct vega10_didt_config_reg SEEDCCtrlForceStallConfig_Vega10[] =
+{
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ * Offset Mask Shift Value
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+ /* SQ */
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_EN_MASK, DIDT_SQ_EDC_CTRL__EDC_EN__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_SW_RST_MASK, DIDT_SQ_EDC_CTRL__EDC_SW_RST__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_CLK_EN_OVERRIDE_MASK, DIDT_SQ_EDC_CTRL__EDC_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_FORCE_STALL_MASK, DIDT_SQ_EDC_CTRL__EDC_FORCE_STALL__SHIFT, 0x0001 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT_MASK, DIDT_SQ_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT__SHIFT, 0x0001 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS_MASK, DIDT_SQ_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS__SHIFT, 0x000C },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA_MASK, DIDT_SQ_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_EN_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_EN__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_STALL_POLICY_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_STALL_POLICY__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_LEVEL_COMB_EN_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_LEVEL_COMB_EN__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__SE_EDC_LEVEL_COMB_EN_MASK, DIDT_SQ_EDC_CTRL__SE_EDC_LEVEL_COMB_EN__SHIFT, 0x0001 },
+
+ /* TD */
+ { ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__EDC_EN_MASK, DIDT_TD_EDC_CTRL__EDC_EN__SHIFT, 0x0000 },
+ { ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__EDC_SW_RST_MASK, DIDT_TD_EDC_CTRL__EDC_SW_RST__SHIFT, 0x0000 },
+ { ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__EDC_CLK_EN_OVERRIDE_MASK, DIDT_TD_EDC_CTRL__EDC_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
+ { ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__EDC_FORCE_STALL_MASK, DIDT_TD_EDC_CTRL__EDC_FORCE_STALL__SHIFT, 0x0001 },
+ { ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT_MASK, DIDT_TD_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT__SHIFT, 0x0001 },
+ { ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS_MASK, DIDT_TD_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS__SHIFT, 0x000E },
+ { ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA_MASK, DIDT_TD_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA__SHIFT, 0x0000 },
+ { ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__GC_EDC_EN_MASK, DIDT_TD_EDC_CTRL__GC_EDC_EN__SHIFT, 0x0000 },
+ { ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__GC_EDC_STALL_POLICY_MASK, DIDT_TD_EDC_CTRL__GC_EDC_STALL_POLICY__SHIFT, 0x0000 },
+ { ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__GC_EDC_LEVEL_COMB_EN_MASK, DIDT_TD_EDC_CTRL__GC_EDC_LEVEL_COMB_EN__SHIFT, 0x0000 },
+ { ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__SE_EDC_LEVEL_COMB_EN_MASK, DIDT_TD_EDC_CTRL__SE_EDC_LEVEL_COMB_EN__SHIFT, 0x0001 },
+
+ { 0xFFFFFFFF } /* End of list */
+};
+
+static const struct vega10_didt_config_reg GCDiDtDroopCtrlConfig_vega10[] =
+{
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ * Offset Mask Shift Value
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+ { mmGC_DIDT_DROOP_CTRL, GC_DIDT_DROOP_CTRL__DIDT_DROOP_LEVEL_EN_MASK, GC_DIDT_DROOP_CTRL__DIDT_DROOP_LEVEL_EN__SHIFT, 0x0000 },
+ { mmGC_DIDT_DROOP_CTRL, GC_DIDT_DROOP_CTRL__DIDT_DROOP_THRESHOLD_MASK, GC_DIDT_DROOP_CTRL__DIDT_DROOP_THRESHOLD__SHIFT, 0x0000 },
+ { mmGC_DIDT_DROOP_CTRL, GC_DIDT_DROOP_CTRL__DIDT_DROOP_LEVEL_INDEX_MASK, GC_DIDT_DROOP_CTRL__DIDT_DROOP_LEVEL_INDEX__SHIFT, 0x0000 },
+ { mmGC_DIDT_DROOP_CTRL, GC_DIDT_DROOP_CTRL__DIDT_LEVEL_SEL_MASK, GC_DIDT_DROOP_CTRL__DIDT_LEVEL_SEL__SHIFT, 0x0000 },
+ { mmGC_DIDT_DROOP_CTRL, GC_DIDT_DROOP_CTRL__DIDT_DROOP_LEVEL_OVERFLOW_MASK, GC_DIDT_DROOP_CTRL__DIDT_DROOP_LEVEL_OVERFLOW__SHIFT, 0x0000 },
+
+ { 0xFFFFFFFF } /* End of list */
+};
+
+static const struct vega10_didt_config_reg GCDiDtCtrl0Config_vega10[] =
+{
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ * Offset Mask Shift Value
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+ { mmGC_DIDT_CTRL0, GC_DIDT_CTRL0__DIDT_CTRL_EN_MASK, GC_DIDT_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0000 },
+ { mmGC_DIDT_CTRL0, GC_DIDT_CTRL0__PHASE_OFFSET_MASK, GC_DIDT_CTRL0__PHASE_OFFSET__SHIFT, 0x0000 },
+ { mmGC_DIDT_CTRL0, GC_DIDT_CTRL0__DIDT_SW_RST_MASK, GC_DIDT_CTRL0__DIDT_SW_RST__SHIFT, 0x0000 },
+ { mmGC_DIDT_CTRL0, GC_DIDT_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, GC_DIDT_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
+ { mmGC_DIDT_CTRL0, GC_DIDT_CTRL0__DIDT_TRIGGER_THROTTLE_LOWBIT_MASK, GC_DIDT_CTRL0__DIDT_TRIGGER_THROTTLE_LOWBIT__SHIFT, 0x0000 },
+ { 0xFFFFFFFF } /* End of list */
+};
+
+
+static const struct vega10_didt_config_reg PSMSEEDCStallPatternConfig_Vega10[] =
+{
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ * Offset Mask Shift Value
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+ /* SQ EDC STALL PATTERNs */
+ { ixDIDT_SQ_EDC_STALL_PATTERN_1_2, DIDT_SQ_EDC_STALL_PATTERN_1_2__EDC_STALL_PATTERN_1_MASK, DIDT_SQ_EDC_STALL_PATTERN_1_2__EDC_STALL_PATTERN_1__SHIFT, 0x0101 },
+ { ixDIDT_SQ_EDC_STALL_PATTERN_1_2, DIDT_SQ_EDC_STALL_PATTERN_1_2__EDC_STALL_PATTERN_2_MASK, DIDT_SQ_EDC_STALL_PATTERN_1_2__EDC_STALL_PATTERN_2__SHIFT, 0x0101 },
+ { ixDIDT_SQ_EDC_STALL_PATTERN_3_4, DIDT_SQ_EDC_STALL_PATTERN_3_4__EDC_STALL_PATTERN_3_MASK, DIDT_SQ_EDC_STALL_PATTERN_3_4__EDC_STALL_PATTERN_3__SHIFT, 0x1111 },
+ { ixDIDT_SQ_EDC_STALL_PATTERN_3_4, DIDT_SQ_EDC_STALL_PATTERN_3_4__EDC_STALL_PATTERN_4_MASK, DIDT_SQ_EDC_STALL_PATTERN_3_4__EDC_STALL_PATTERN_4__SHIFT, 0x1111 },
+
+ { ixDIDT_SQ_EDC_STALL_PATTERN_5_6, DIDT_SQ_EDC_STALL_PATTERN_5_6__EDC_STALL_PATTERN_5_MASK, DIDT_SQ_EDC_STALL_PATTERN_5_6__EDC_STALL_PATTERN_5__SHIFT, 0x1515 },
+ { ixDIDT_SQ_EDC_STALL_PATTERN_5_6, DIDT_SQ_EDC_STALL_PATTERN_5_6__EDC_STALL_PATTERN_6_MASK, DIDT_SQ_EDC_STALL_PATTERN_5_6__EDC_STALL_PATTERN_6__SHIFT, 0x1515 },
+
+ { ixDIDT_SQ_EDC_STALL_PATTERN_7, DIDT_SQ_EDC_STALL_PATTERN_7__EDC_STALL_PATTERN_7_MASK, DIDT_SQ_EDC_STALL_PATTERN_7__EDC_STALL_PATTERN_7__SHIFT, 0x5555 },
+
+ { 0xFFFFFFFF } /* End of list */
+};
+
+static const struct vega10_didt_config_reg PSMSEEDCStallDelayConfig_Vega10[] =
+{
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ * Offset Mask Shift Value
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+ /* SQ EDC STALL DELAYs */
+ { ixDIDT_SQ_EDC_STALL_DELAY_1, DIDT_SQ_EDC_STALL_DELAY_1__EDC_STALL_DELAY_SQ0_MASK, DIDT_SQ_EDC_STALL_DELAY_1__EDC_STALL_DELAY_SQ0__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_STALL_DELAY_1, DIDT_SQ_EDC_STALL_DELAY_1__EDC_STALL_DELAY_SQ1_MASK, DIDT_SQ_EDC_STALL_DELAY_1__EDC_STALL_DELAY_SQ1__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_STALL_DELAY_1, DIDT_SQ_EDC_STALL_DELAY_1__EDC_STALL_DELAY_SQ2_MASK, DIDT_SQ_EDC_STALL_DELAY_1__EDC_STALL_DELAY_SQ2__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_STALL_DELAY_1, DIDT_SQ_EDC_STALL_DELAY_1__EDC_STALL_DELAY_SQ3_MASK, DIDT_SQ_EDC_STALL_DELAY_1__EDC_STALL_DELAY_SQ3__SHIFT, 0x0000 },
+
+ { ixDIDT_SQ_EDC_STALL_DELAY_2, DIDT_SQ_EDC_STALL_DELAY_2__EDC_STALL_DELAY_SQ4_MASK, DIDT_SQ_EDC_STALL_DELAY_2__EDC_STALL_DELAY_SQ4__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_STALL_DELAY_2, DIDT_SQ_EDC_STALL_DELAY_2__EDC_STALL_DELAY_SQ5_MASK, DIDT_SQ_EDC_STALL_DELAY_2__EDC_STALL_DELAY_SQ5__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_STALL_DELAY_2, DIDT_SQ_EDC_STALL_DELAY_2__EDC_STALL_DELAY_SQ6_MASK, DIDT_SQ_EDC_STALL_DELAY_2__EDC_STALL_DELAY_SQ6__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_STALL_DELAY_2, DIDT_SQ_EDC_STALL_DELAY_2__EDC_STALL_DELAY_SQ7_MASK, DIDT_SQ_EDC_STALL_DELAY_2__EDC_STALL_DELAY_SQ7__SHIFT, 0x0000 },
+
+ { ixDIDT_SQ_EDC_STALL_DELAY_3, DIDT_SQ_EDC_STALL_DELAY_3__EDC_STALL_DELAY_SQ8_MASK, DIDT_SQ_EDC_STALL_DELAY_3__EDC_STALL_DELAY_SQ8__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_STALL_DELAY_3, DIDT_SQ_EDC_STALL_DELAY_3__EDC_STALL_DELAY_SQ9_MASK, DIDT_SQ_EDC_STALL_DELAY_3__EDC_STALL_DELAY_SQ9__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_STALL_DELAY_3, DIDT_SQ_EDC_STALL_DELAY_3__EDC_STALL_DELAY_SQ10_MASK, DIDT_SQ_EDC_STALL_DELAY_3__EDC_STALL_DELAY_SQ10__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_STALL_DELAY_3, DIDT_SQ_EDC_STALL_DELAY_3__EDC_STALL_DELAY_SQ11_MASK, DIDT_SQ_EDC_STALL_DELAY_3__EDC_STALL_DELAY_SQ11__SHIFT, 0x0000 },
+
+ { ixDIDT_SQ_EDC_STALL_DELAY_4, DIDT_SQ_EDC_STALL_DELAY_4__EDC_STALL_DELAY_SQ12_MASK, DIDT_SQ_EDC_STALL_DELAY_4__EDC_STALL_DELAY_SQ12__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_STALL_DELAY_4, DIDT_SQ_EDC_STALL_DELAY_4__EDC_STALL_DELAY_SQ12_MASK, DIDT_SQ_EDC_STALL_DELAY_4__EDC_STALL_DELAY_SQ13__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_STALL_DELAY_4, DIDT_SQ_EDC_STALL_DELAY_4__EDC_STALL_DELAY_SQ14_MASK, DIDT_SQ_EDC_STALL_DELAY_4__EDC_STALL_DELAY_SQ14__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_STALL_DELAY_4, DIDT_SQ_EDC_STALL_DELAY_4__EDC_STALL_DELAY_SQ15_MASK, DIDT_SQ_EDC_STALL_DELAY_4__EDC_STALL_DELAY_SQ15__SHIFT, 0x0000 },
+
+ { 0xFFFFFFFF } /* End of list */
+};
+
+static const struct vega10_didt_config_reg PSMSEEDCThresholdConfig_Vega10[] =
+{
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ * Offset Mask Shift Value
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+ /* SQ EDC THRESHOLD */
+ { ixDIDT_SQ_EDC_THRESHOLD, DIDT_SQ_EDC_THRESHOLD__EDC_THRESHOLD_MASK, DIDT_SQ_EDC_THRESHOLD__EDC_THRESHOLD__SHIFT, 0x0000 },
+
+ { 0xFFFFFFFF } /* End of list */
+};
+
+static const struct vega10_didt_config_reg PSMSEEDCCtrlResetConfig_Vega10[] =
+{
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ * Offset Mask Shift Value
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+ /* SQ EDC CTRL */
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_EN_MASK, DIDT_SQ_EDC_CTRL__EDC_EN__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_SW_RST_MASK, DIDT_SQ_EDC_CTRL__EDC_SW_RST__SHIFT, 0x0001 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_CLK_EN_OVERRIDE_MASK, DIDT_SQ_EDC_CTRL__EDC_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_FORCE_STALL_MASK, DIDT_SQ_EDC_CTRL__EDC_FORCE_STALL__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT_MASK, DIDT_SQ_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS_MASK, DIDT_SQ_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA_MASK, DIDT_SQ_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_EN_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_EN__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_STALL_POLICY_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_STALL_POLICY__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_LEVEL_COMB_EN_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_LEVEL_COMB_EN__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__SE_EDC_LEVEL_COMB_EN_MASK, DIDT_SQ_EDC_CTRL__SE_EDC_LEVEL_COMB_EN__SHIFT, 0x0000 },
+
+ { 0xFFFFFFFF } /* End of list */
+};
+
+static const struct vega10_didt_config_reg PSMSEEDCCtrlConfig_Vega10[] =
+{
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ * Offset Mask Shift Value
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+ /* SQ EDC CTRL */
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_EN_MASK, DIDT_SQ_EDC_CTRL__EDC_EN__SHIFT, 0x0001 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_SW_RST_MASK, DIDT_SQ_EDC_CTRL__EDC_SW_RST__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_CLK_EN_OVERRIDE_MASK, DIDT_SQ_EDC_CTRL__EDC_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_FORCE_STALL_MASK, DIDT_SQ_EDC_CTRL__EDC_FORCE_STALL__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT_MASK, DIDT_SQ_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS_MASK, DIDT_SQ_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS__SHIFT, 0x000E },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA_MASK, DIDT_SQ_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_EN_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_EN__SHIFT, 0x0001 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_STALL_POLICY_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_STALL_POLICY__SHIFT, 0x0003 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_LEVEL_COMB_EN_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_LEVEL_COMB_EN__SHIFT, 0x0001 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__SE_EDC_LEVEL_COMB_EN_MASK, DIDT_SQ_EDC_CTRL__SE_EDC_LEVEL_COMB_EN__SHIFT, 0x0000 },
+
+ { 0xFFFFFFFF } /* End of list */
+};
+
+static const struct vega10_didt_config_reg PSMGCEDCThresholdConfig_vega10[] =
+{
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ * Offset Mask Shift Value
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+ { mmGC_EDC_THRESHOLD, GC_EDC_THRESHOLD__EDC_THRESHOLD_MASK, GC_EDC_THRESHOLD__EDC_THRESHOLD__SHIFT, 0x0000000 },
+
+ { 0xFFFFFFFF } /* End of list */
+};
+
+static const struct vega10_didt_config_reg PSMGCEDCDroopCtrlConfig_vega10[] =
+{
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ * Offset Mask Shift Value
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+ { mmGC_EDC_DROOP_CTRL, GC_EDC_DROOP_CTRL__EDC_DROOP_LEVEL_EN_MASK, GC_EDC_DROOP_CTRL__EDC_DROOP_LEVEL_EN__SHIFT, 0x0001 },
+ { mmGC_EDC_DROOP_CTRL, GC_EDC_DROOP_CTRL__EDC_DROOP_THRESHOLD_MASK, GC_EDC_DROOP_CTRL__EDC_DROOP_THRESHOLD__SHIFT, 0x0384 },
+ { mmGC_EDC_DROOP_CTRL, GC_EDC_DROOP_CTRL__EDC_DROOP_LEVEL_INDEX_MASK, GC_EDC_DROOP_CTRL__EDC_DROOP_LEVEL_INDEX__SHIFT, 0x0001 },
+ { mmGC_EDC_DROOP_CTRL, GC_EDC_DROOP_CTRL__AVG_PSM_SEL_MASK, GC_EDC_DROOP_CTRL__AVG_PSM_SEL__SHIFT, 0x0001 },
+ { mmGC_EDC_DROOP_CTRL, GC_EDC_DROOP_CTRL__EDC_LEVEL_SEL_MASK, GC_EDC_DROOP_CTRL__EDC_LEVEL_SEL__SHIFT, 0x0001 },
+
+ { 0xFFFFFFFF } /* End of list */
+};
+
+static const struct vega10_didt_config_reg PSMGCEDCCtrlResetConfig_vega10[] =
+{
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ * Offset Mask Shift Value
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+ { mmGC_EDC_CTRL, GC_EDC_CTRL__EDC_EN_MASK, GC_EDC_CTRL__EDC_EN__SHIFT, 0x0000 },
+ { mmGC_EDC_CTRL, GC_EDC_CTRL__EDC_SW_RST_MASK, GC_EDC_CTRL__EDC_SW_RST__SHIFT, 0x0001 },
+ { mmGC_EDC_CTRL, GC_EDC_CTRL__EDC_CLK_EN_OVERRIDE_MASK, GC_EDC_CTRL__EDC_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
+ { mmGC_EDC_CTRL, GC_EDC_CTRL__EDC_FORCE_STALL_MASK, GC_EDC_CTRL__EDC_FORCE_STALL__SHIFT, 0x0000 },
+ { mmGC_EDC_CTRL, GC_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT_MASK, GC_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT__SHIFT, 0x0000 },
+ { mmGC_EDC_CTRL, GC_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA_MASK, GC_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA__SHIFT, 0x0000 },
+
+ { 0xFFFFFFFF } /* End of list */
+};
+
+static const struct vega10_didt_config_reg PSMGCEDCCtrlConfig_vega10[] =
+{
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ * Offset Mask Shift Value
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+ { mmGC_EDC_CTRL, GC_EDC_CTRL__EDC_EN_MASK, GC_EDC_CTRL__EDC_EN__SHIFT, 0x0001 },
+ { mmGC_EDC_CTRL, GC_EDC_CTRL__EDC_SW_RST_MASK, GC_EDC_CTRL__EDC_SW_RST__SHIFT, 0x0000 },
+ { mmGC_EDC_CTRL, GC_EDC_CTRL__EDC_CLK_EN_OVERRIDE_MASK, GC_EDC_CTRL__EDC_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
+ { mmGC_EDC_CTRL, GC_EDC_CTRL__EDC_FORCE_STALL_MASK, GC_EDC_CTRL__EDC_FORCE_STALL__SHIFT, 0x0000 },
+ { mmGC_EDC_CTRL, GC_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT_MASK, GC_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT__SHIFT, 0x0000 },
+ { mmGC_EDC_CTRL, GC_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA_MASK, GC_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA__SHIFT, 0x0000 },
+
+ { 0xFFFFFFFF } /* End of list */
+};
+
+static const struct vega10_didt_config_reg AvfsPSMResetConfig_vega10[]=
+{
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ * Offset Mask Shift Value
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+ { 0x16A02, 0xFFFFFFFF, 0x0, 0x0000005F },
+ { 0x16A05, 0xFFFFFFFF, 0x0, 0x00000001 },
+ { 0x16A06, 0x00000001, 0x0, 0x02000000 },
+ { 0x16A01, 0xFFFFFFFF, 0x0, 0x00003027 },
+
+ { 0xFFFFFFFF } /* End of list */
+};
+
+static const struct vega10_didt_config_reg AvfsPSMInitConfig_vega10[] =
+{
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ * Offset Mask Shift Value
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+ { 0x16A05, 0xFFFFFFFF, 0x18, 0x00000001 },
+ { 0x16A05, 0xFFFFFFFF, 0x8, 0x00000003 },
+ { 0x16A05, 0xFFFFFFFF, 0xa, 0x00000006 },
+ { 0x16A05, 0xFFFFFFFF, 0x7, 0x00000000 },
+ { 0x16A06, 0xFFFFFFFF, 0x18, 0x00000001 },
+ { 0x16A06, 0xFFFFFFFF, 0x19, 0x00000001 },
+ { 0x16A01, 0xFFFFFFFF, 0x0, 0x00003027 },
+
+ { 0xFFFFFFFF } /* End of list */
+};
+
+static int vega10_program_didt_config_registers(struct pp_hwmgr *hwmgr, const struct vega10_didt_config_reg *config_regs, enum vega10_didt_config_reg_type reg_type)
+{
+ uint32_t data;
+
+ PP_ASSERT_WITH_CODE((config_regs != NULL), "[vega10_program_didt_config_registers] Invalid config register table!", return -EINVAL);
+
+ while (config_regs->offset != 0xFFFFFFFF) {
+ switch (reg_type) {
+ case VEGA10_CONFIGREG_DIDT:
+ data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, config_regs->offset);
+ data &= ~config_regs->mask;
+ data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, config_regs->offset, data);
+ break;
+ case VEGA10_CONFIGREG_GCCAC:
+ data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG_GC_CAC, config_regs->offset);
+ data &= ~config_regs->mask;
+ data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG_GC_CAC, config_regs->offset, data);
+ break;
+ case VEGA10_CONFIGREG_SECAC:
+ data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG_SE_CAC, config_regs->offset);
+ data &= ~config_regs->mask;
+ data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG_SE_CAC, config_regs->offset, data);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ config_regs++;
+ }
+
+ return 0;
+}
+
+static int vega10_program_gc_didt_config_registers(struct pp_hwmgr *hwmgr, const struct vega10_didt_config_reg *config_regs)
+{
+ uint32_t data;
+
+ while (config_regs->offset != 0xFFFFFFFF) {
+ data = cgs_read_register(hwmgr->device, config_regs->offset);
+ data &= ~config_regs->mask;
+ data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
+ cgs_write_register(hwmgr->device, config_regs->offset, data);
+ config_regs++;
+ }
+
+ return 0;
+}
+
+static void vega10_didt_set_mask(struct pp_hwmgr *hwmgr, const bool enable)
+{
+ uint32_t data;
+ int result;
+ uint32_t en = (enable ? 1 : 0);
+ uint32_t didt_block_info = SQ_IR_MASK | TCP_IR_MASK | TD_PCC_MASK;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping)) {
+ data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_SQ_CTRL0);
+ data &= ~DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK;
+ data |= ((en << DIDT_SQ_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_SQ_CTRL0, data);
+ didt_block_info &= ~SQ_Enable_MASK;
+ didt_block_info |= en << SQ_Enable_SHIFT;
+ }
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRamping)) {
+ data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DB_CTRL0);
+ data &= ~DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK;
+ data |= ((en << DIDT_DB_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DB_CTRL0, data);
+ didt_block_info &= ~DB_Enable_MASK;
+ didt_block_info |= en << DB_Enable_SHIFT;
+ }
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TDRamping)) {
+ data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TD_CTRL0);
+ data &= ~DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK;
+ data |= ((en << DIDT_TD_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TD_CTRL0, data);
+ didt_block_info &= ~TD_Enable_MASK;
+ didt_block_info |= en << TD_Enable_SHIFT;
+ }
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping)) {
+ data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TCP_CTRL0);
+ data &= ~DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK;
+ data |= ((en << DIDT_TCP_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TCP_CTRL0, data);
+ didt_block_info &= ~TCP_Enable_MASK;
+ didt_block_info |= en << TCP_Enable_SHIFT;
+ }
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRRamping)) {
+ data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DBR_CTRL0);
+ data &= ~DIDT_DBR_CTRL0__DIDT_CTRL_EN_MASK;
+ data |= ((en << DIDT_DBR_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_DBR_CTRL0__DIDT_CTRL_EN_MASK);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DBR_CTRL0, data);
+ }
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DiDtEDCEnable)) {
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping)) {
+ data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_SQ_EDC_CTRL);
+ data &= ~DIDT_SQ_EDC_CTRL__EDC_EN_MASK;
+ data |= ((en << DIDT_SQ_EDC_CTRL__EDC_EN__SHIFT) & DIDT_SQ_EDC_CTRL__EDC_EN_MASK);
+ data &= ~DIDT_SQ_EDC_CTRL__EDC_SW_RST_MASK;
+ data |= ((~en << DIDT_SQ_EDC_CTRL__EDC_SW_RST__SHIFT) & DIDT_SQ_EDC_CTRL__EDC_SW_RST_MASK);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_SQ_EDC_CTRL, data);
+ }
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRamping)) {
+ data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DB_EDC_CTRL);
+ data &= ~DIDT_DB_EDC_CTRL__EDC_EN_MASK;
+ data |= ((en << DIDT_DB_EDC_CTRL__EDC_EN__SHIFT) & DIDT_DB_EDC_CTRL__EDC_EN_MASK);
+ data &= ~DIDT_DB_EDC_CTRL__EDC_SW_RST_MASK;
+ data |= ((~en << DIDT_DB_EDC_CTRL__EDC_SW_RST__SHIFT) & DIDT_DB_EDC_CTRL__EDC_SW_RST_MASK);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DB_EDC_CTRL, data);
+ }
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TDRamping)) {
+ data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TD_EDC_CTRL);
+ data &= ~DIDT_TD_EDC_CTRL__EDC_EN_MASK;
+ data |= ((en << DIDT_TD_EDC_CTRL__EDC_EN__SHIFT) & DIDT_TD_EDC_CTRL__EDC_EN_MASK);
+ data &= ~DIDT_TD_EDC_CTRL__EDC_SW_RST_MASK;
+ data |= ((~en << DIDT_TD_EDC_CTRL__EDC_SW_RST__SHIFT) & DIDT_TD_EDC_CTRL__EDC_SW_RST_MASK);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TD_EDC_CTRL, data);
+ }
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping)) {
+ data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TCP_EDC_CTRL);
+ data &= ~DIDT_TCP_EDC_CTRL__EDC_EN_MASK;
+ data |= ((en << DIDT_TCP_EDC_CTRL__EDC_EN__SHIFT) & DIDT_TCP_EDC_CTRL__EDC_EN_MASK);
+ data &= ~DIDT_TCP_EDC_CTRL__EDC_SW_RST_MASK;
+ data |= ((~en << DIDT_TCP_EDC_CTRL__EDC_SW_RST__SHIFT) & DIDT_TCP_EDC_CTRL__EDC_SW_RST_MASK);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TCP_EDC_CTRL, data);
+ }
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRRamping)) {
+ data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DBR_EDC_CTRL);
+ data &= ~DIDT_DBR_EDC_CTRL__EDC_EN_MASK;
+ data |= ((en << DIDT_DBR_EDC_CTRL__EDC_EN__SHIFT) & DIDT_DBR_EDC_CTRL__EDC_EN_MASK);
+ data &= ~DIDT_DBR_EDC_CTRL__EDC_SW_RST_MASK;
+ data |= ((~en << DIDT_DBR_EDC_CTRL__EDC_SW_RST__SHIFT) & DIDT_DBR_EDC_CTRL__EDC_SW_RST_MASK);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DBR_EDC_CTRL, data);
+ }
+ }
+
+ if (enable) {
+ /* For Vega10, SMC does not support any mask yet. */
+ result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_ConfigureGfxDidt, didt_block_info);
+ PP_ASSERT((0 == result), "[EnableDiDtConfig] SMC Configure Gfx Didt Failed!");
+ }
+}
+
+static int vega10_enable_cac_driving_se_didt_config(struct pp_hwmgr *hwmgr)
+{
+ int result;
+ uint32_t num_se = 0, count, data;
+ struct cgs_system_info sys_info = {0};
+ uint32_t reg;
+
+ sys_info.size = sizeof(struct cgs_system_info);
+ sys_info.info_id = CGS_SYSTEM_INFO_GFX_SE_INFO;
+ if (cgs_query_system_info(hwmgr->device, &sys_info) == 0)
+ num_se = sys_info.value;
+
+ cgs_enter_safe_mode(hwmgr->device, true);
+
+ cgs_lock_grbm_idx(hwmgr->device, true);
+ reg = soc15_get_register_offset(GC_HWID, 0, mmGRBM_GFX_INDEX_BASE_IDX, mmGRBM_GFX_INDEX);
+ for (count = 0; count < num_se; count++) {
+ data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK | GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK | ( count << GRBM_GFX_INDEX__SE_INDEX__SHIFT);
+ cgs_write_register(hwmgr->device, reg, data);
+
+ result = vega10_program_didt_config_registers(hwmgr, SEDiDtStallCtrlConfig_vega10, VEGA10_CONFIGREG_DIDT);
+ result |= vega10_program_didt_config_registers(hwmgr, SEDiDtStallPatternConfig_vega10, VEGA10_CONFIGREG_DIDT);
+ result |= vega10_program_didt_config_registers(hwmgr, SEDiDtWeightConfig_Vega10, VEGA10_CONFIGREG_DIDT);
+ result |= vega10_program_didt_config_registers(hwmgr, SEDiDtCtrl1Config_Vega10, VEGA10_CONFIGREG_DIDT);
+ result |= vega10_program_didt_config_registers(hwmgr, SEDiDtCtrl2Config_Vega10, VEGA10_CONFIGREG_DIDT);
+ result |= vega10_program_didt_config_registers(hwmgr, SEDiDtCtrl3Config_vega10, VEGA10_CONFIGREG_DIDT);
+ result |= vega10_program_didt_config_registers(hwmgr, SEDiDtTuningCtrlConfig_Vega10, VEGA10_CONFIGREG_DIDT);
+ result |= vega10_program_didt_config_registers(hwmgr, SELCacConfig_Vega10, VEGA10_CONFIGREG_SECAC);
+ result |= vega10_program_didt_config_registers(hwmgr, SEDiDtCtrl0Config_Vega10, VEGA10_CONFIGREG_DIDT);
+
+ if (0 != result)
+ break;
+ }
+ cgs_write_register(hwmgr->device, reg, 0xE0000000);
+ cgs_lock_grbm_idx(hwmgr->device, false);
+
+ vega10_didt_set_mask(hwmgr, true);
+
+ cgs_enter_safe_mode(hwmgr->device, false);
+
+ return 0;
+}
+
+static int vega10_disable_cac_driving_se_didt_config(struct pp_hwmgr *hwmgr)
+{
+ cgs_enter_safe_mode(hwmgr->device, true);
+
+ vega10_didt_set_mask(hwmgr, false);
+
+ cgs_enter_safe_mode(hwmgr->device, false);
+
+ return 0;
+}
+
+static int vega10_enable_psm_gc_didt_config(struct pp_hwmgr *hwmgr)
+{
+ int result;
+ uint32_t num_se = 0, count, data;
+ struct cgs_system_info sys_info = {0};
+ uint32_t reg;
+
+ sys_info.size = sizeof(struct cgs_system_info);
+ sys_info.info_id = CGS_SYSTEM_INFO_GFX_SE_INFO;
+ if (cgs_query_system_info(hwmgr->device, &sys_info) == 0)
+ num_se = sys_info.value;
+
+ cgs_enter_safe_mode(hwmgr->device, true);
+
+ cgs_lock_grbm_idx(hwmgr->device, true);
+ reg = soc15_get_register_offset(GC_HWID, 0, mmGRBM_GFX_INDEX_BASE_IDX, mmGRBM_GFX_INDEX);
+ for (count = 0; count < num_se; count++) {
+ data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK | GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK | ( count << GRBM_GFX_INDEX__SE_INDEX__SHIFT);
+ cgs_write_register(hwmgr->device, reg, data);
+
+ result = vega10_program_didt_config_registers(hwmgr, SEDiDtStallCtrlConfig_vega10, VEGA10_CONFIGREG_DIDT);
+ result |= vega10_program_didt_config_registers(hwmgr, SEDiDtStallPatternConfig_vega10, VEGA10_CONFIGREG_DIDT);
+ result |= vega10_program_didt_config_registers(hwmgr, SEDiDtCtrl3Config_vega10, VEGA10_CONFIGREG_DIDT);
+ result |= vega10_program_didt_config_registers(hwmgr, SEDiDtCtrl0Config_Vega10, VEGA10_CONFIGREG_DIDT);
+ if (0 != result)
+ break;
+ }
+ cgs_write_register(hwmgr->device, reg, 0xE0000000);
+ cgs_lock_grbm_idx(hwmgr->device, false);
+
+ vega10_didt_set_mask(hwmgr, true);
+
+ cgs_enter_safe_mode(hwmgr->device, false);
+
+ vega10_program_gc_didt_config_registers(hwmgr, GCDiDtDroopCtrlConfig_vega10);
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_GCEDC))
+ vega10_program_gc_didt_config_registers(hwmgr, GCDiDtCtrl0Config_vega10);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PSM))
+ vega10_program_gc_didt_config_registers(hwmgr, AvfsPSMInitConfig_vega10);
+
+ return 0;
+}
+
+static int vega10_disable_psm_gc_didt_config(struct pp_hwmgr *hwmgr)
+{
+ uint32_t data;
+
+ cgs_enter_safe_mode(hwmgr->device, true);
+
+ vega10_didt_set_mask(hwmgr, false);
+
+ cgs_enter_safe_mode(hwmgr->device, false);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_GCEDC)) {
+ data = 0x00000000;
+ cgs_write_register(hwmgr->device, mmGC_DIDT_CTRL0, data);
+ }
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PSM))
+ vega10_program_gc_didt_config_registers(hwmgr, AvfsPSMResetConfig_vega10);
+
+ return 0;
+}
+
+static int vega10_enable_se_edc_config(struct pp_hwmgr *hwmgr)
+{
+ int result;
+ uint32_t num_se = 0, count, data;
+ struct cgs_system_info sys_info = {0};
+ uint32_t reg;
+
+ sys_info.size = sizeof(struct cgs_system_info);
+ sys_info.info_id = CGS_SYSTEM_INFO_GFX_SE_INFO;
+ if (cgs_query_system_info(hwmgr->device, &sys_info) == 0)
+ num_se = sys_info.value;
+
+ cgs_enter_safe_mode(hwmgr->device, true);
+
+ cgs_lock_grbm_idx(hwmgr->device, true);
+ reg = soc15_get_register_offset(GC_HWID, 0, mmGRBM_GFX_INDEX_BASE_IDX, mmGRBM_GFX_INDEX);
+ for (count = 0; count < num_se; count++) {
+ data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK | GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK | ( count << GRBM_GFX_INDEX__SE_INDEX__SHIFT);
+ cgs_write_register(hwmgr->device, reg, data);
+ result = vega10_program_didt_config_registers(hwmgr, SEDiDtWeightConfig_Vega10, VEGA10_CONFIGREG_DIDT);
+ result |= vega10_program_didt_config_registers(hwmgr, SEEDCStallPatternConfig_Vega10, VEGA10_CONFIGREG_DIDT);
+ result |= vega10_program_didt_config_registers(hwmgr, SEEDCStallDelayConfig_Vega10, VEGA10_CONFIGREG_DIDT);
+ result |= vega10_program_didt_config_registers(hwmgr, SEEDCThresholdConfig_Vega10, VEGA10_CONFIGREG_DIDT);
+ result |= vega10_program_didt_config_registers(hwmgr, SEEDCCtrlResetConfig_Vega10, VEGA10_CONFIGREG_DIDT);
+ result |= vega10_program_didt_config_registers(hwmgr, SEEDCCtrlConfig_Vega10, VEGA10_CONFIGREG_DIDT);
+
+ if (0 != result)
+ break;
+ }
+ cgs_write_register(hwmgr->device, reg, 0xE0000000);
+ cgs_lock_grbm_idx(hwmgr->device, false);
+
+ vega10_didt_set_mask(hwmgr, true);
+
+ cgs_enter_safe_mode(hwmgr->device, false);
+
+ return 0;
+}
+
+static int vega10_disable_se_edc_config(struct pp_hwmgr *hwmgr)
+{
+ cgs_enter_safe_mode(hwmgr->device, true);
+
+ vega10_didt_set_mask(hwmgr, false);
+
+ cgs_enter_safe_mode(hwmgr->device, false);
+
+ return 0;
+}
+
+static int vega10_enable_psm_gc_edc_config(struct pp_hwmgr *hwmgr)
+{
+ int result;
+ uint32_t num_se = 0;
+ uint32_t count, data;
+ struct cgs_system_info sys_info = {0};
+ uint32_t reg;
+
+ sys_info.size = sizeof(struct cgs_system_info);
+ sys_info.info_id = CGS_SYSTEM_INFO_GFX_SE_INFO;
+ if (cgs_query_system_info(hwmgr->device, &sys_info) == 0)
+ num_se = sys_info.value;
+
+ cgs_enter_safe_mode(hwmgr->device, true);
+
+ vega10_program_gc_didt_config_registers(hwmgr, AvfsPSMResetConfig_vega10);
+
+ cgs_lock_grbm_idx(hwmgr->device, true);
+ reg = soc15_get_register_offset(GC_HWID, 0, mmGRBM_GFX_INDEX_BASE_IDX, mmGRBM_GFX_INDEX);
+ for (count = 0; count < num_se; count++) {
+ data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK | GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK | ( count << GRBM_GFX_INDEX__SE_INDEX__SHIFT);
+ cgs_write_register(hwmgr->device, reg, data);
+ result |= vega10_program_didt_config_registers(hwmgr, PSMSEEDCStallPatternConfig_Vega10, VEGA10_CONFIGREG_DIDT);
+ result |= vega10_program_didt_config_registers(hwmgr, PSMSEEDCStallDelayConfig_Vega10, VEGA10_CONFIGREG_DIDT);
+ result |= vega10_program_didt_config_registers(hwmgr, PSMSEEDCCtrlResetConfig_Vega10, VEGA10_CONFIGREG_DIDT);
+ result |= vega10_program_didt_config_registers(hwmgr, PSMSEEDCCtrlConfig_Vega10, VEGA10_CONFIGREG_DIDT);
+
+ if (0 != result)
+ break;
+ }
+ cgs_write_register(hwmgr->device, reg, 0xE0000000);
+ cgs_lock_grbm_idx(hwmgr->device, false);
+
+ vega10_didt_set_mask(hwmgr, true);
+
+ cgs_enter_safe_mode(hwmgr->device, false);
+
+ vega10_program_gc_didt_config_registers(hwmgr, PSMGCEDCDroopCtrlConfig_vega10);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_GCEDC)) {
+ vega10_program_gc_didt_config_registers(hwmgr, PSMGCEDCCtrlResetConfig_vega10);
+ vega10_program_gc_didt_config_registers(hwmgr, PSMGCEDCCtrlConfig_vega10);
+ }
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PSM))
+ vega10_program_gc_didt_config_registers(hwmgr, AvfsPSMInitConfig_vega10);
+
+ return 0;
+}
+
+static int vega10_disable_psm_gc_edc_config(struct pp_hwmgr *hwmgr)
+{
+ uint32_t data;
+
+ cgs_enter_safe_mode(hwmgr->device, true);
+
+ vega10_didt_set_mask(hwmgr, false);
+
+ cgs_enter_safe_mode(hwmgr->device, false);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_GCEDC)) {
+ data = 0x00000000;
+ cgs_write_register(hwmgr->device, mmGC_EDC_CTRL, data);
+ }
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PSM))
+ vega10_program_gc_didt_config_registers(hwmgr, AvfsPSMResetConfig_vega10);
+
+ return 0;
+}
+
+static int vega10_enable_se_edc_force_stall_config(struct pp_hwmgr *hwmgr)
+{
+ uint32_t reg;
+ int result;
+
+ cgs_enter_safe_mode(hwmgr->device, true);
+
+ cgs_lock_grbm_idx(hwmgr->device, true);
+ reg = soc15_get_register_offset(GC_HWID, 0, mmGRBM_GFX_INDEX_BASE_IDX, mmGRBM_GFX_INDEX);
+ cgs_write_register(hwmgr->device, reg, 0xE0000000);
+ cgs_lock_grbm_idx(hwmgr->device, false);
+
+ result = vega10_program_didt_config_registers(hwmgr, SEEDCForceStallPatternConfig_Vega10, VEGA10_CONFIGREG_DIDT);
+ result |= vega10_program_didt_config_registers(hwmgr, SEEDCCtrlForceStallConfig_Vega10, VEGA10_CONFIGREG_DIDT);
+ if (0 != result)
+ return result;
+
+ vega10_didt_set_mask(hwmgr, false);
+
+ cgs_enter_safe_mode(hwmgr->device, false);
+
+ return 0;
+}
+
+static int vega10_disable_se_edc_force_stall_config(struct pp_hwmgr *hwmgr)
+{
+ int result;
+
+ result = vega10_disable_se_edc_config(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == result), "[DisableDiDtConfig] Pre DIDT disable clock gating failed!", return result);
+
+ return 0;
+}
+
+int vega10_enable_didt_config(struct pp_hwmgr *hwmgr)
+{
+ int result = 0;
+ struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
+
+ if (data->smu_features[GNLD_DIDT].supported) {
+ if (data->smu_features[GNLD_DIDT].enabled)
+ PP_DBG_LOG("[EnableDiDtConfig] Feature DiDt Already enabled!\n");
+
+ switch (data->registry_data.didt_mode) {
+ case 0:
+ result = vega10_enable_cac_driving_se_didt_config(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == result), "[EnableDiDt] Attempt to enable DiDt Mode 0 Failed!", return result);
+ break;
+ case 2:
+ result = vega10_enable_psm_gc_didt_config(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == result), "[EnableDiDt] Attempt to enable DiDt Mode 2 Failed!", return result);
+ break;
+ case 3:
+ result = vega10_enable_se_edc_config(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == result), "[EnableDiDt] Attempt to enable DiDt Mode 3 Failed!", return result);
+ break;
+ case 1:
+ case 4:
+ case 5:
+ result = vega10_enable_psm_gc_edc_config(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == result), "[EnableDiDt] Attempt to enable DiDt Mode 5 Failed!", return result);
+ break;
+ case 6:
+ result = vega10_enable_se_edc_force_stall_config(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == result), "[EnableDiDt] Attempt to enable DiDt Mode 6 Failed!", return result);
+ break;
+ default:
+ result = -EINVAL;
+ break;
+ }
+
+ if (0 == result) {
+ PP_ASSERT_WITH_CODE((!vega10_enable_smc_features(hwmgr->smumgr, true, data->smu_features[GNLD_DIDT].smu_feature_bitmap)),
+ "[EnableDiDtConfig] Attempt to Enable DiDt feature Failed!", return result);
+ data->smu_features[GNLD_DIDT].enabled = true;
+ }
+ }
+
+ return result;
+}
+
+int vega10_disable_didt_config(struct pp_hwmgr *hwmgr)
+{
+ int result = 0;
+ struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
+
+ if (data->smu_features[GNLD_DIDT].supported) {
+ if (!data->smu_features[GNLD_DIDT].enabled)
+ PP_DBG_LOG("[DisableDiDtConfig] Feature DiDt Already Disabled!\n");
+
+ switch (data->registry_data.didt_mode) {
+ case 0:
+ result = vega10_disable_cac_driving_se_didt_config(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == result), "[DisableDiDt] Attempt to disable DiDt Mode 0 Failed!", return result);
+ break;
+ case 2:
+ result = vega10_disable_psm_gc_didt_config(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == result), "[DisableDiDt] Attempt to disable DiDt Mode 2 Failed!", return result);
+ break;
+ case 3:
+ result = vega10_disable_se_edc_config(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == result), "[DisableDiDt] Attempt to disable DiDt Mode 3 Failed!", return result);
+ break;
+ case 1:
+ case 4:
+ case 5:
+ result = vega10_disable_psm_gc_edc_config(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == result), "[DisableDiDt] Attempt to disable DiDt Mode 5 Failed!", return result);
+ break;
+ case 6:
+ result = vega10_disable_se_edc_force_stall_config(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == result), "[DisableDiDt] Attempt to disable DiDt Mode 6 Failed!", return result);
+ break;
+ default:
+ result = -EINVAL;
+ break;
+ }
+
+ if (0 == result) {
+ PP_ASSERT_WITH_CODE((0 != vega10_enable_smc_features(hwmgr->smumgr, false, data->smu_features[GNLD_DIDT].smu_feature_bitmap)),
+ "[DisableDiDtConfig] Attempt to Disable DiDt feature Failed!", return result);
+ data->smu_features[GNLD_DIDT].enabled = false;
+ }
+ }
+
+ return result;
+}
void vega10_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
{
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.h
index 9ecaa27c0bb5..b95771ab89cd 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.h
@@ -31,6 +31,12 @@ enum vega10_pt_config_reg_type {
VEGA10_CONFIGREG_MAX
};
+enum vega10_didt_config_reg_type {
+ VEGA10_CONFIGREG_DIDT = 0,
+ VEGA10_CONFIGREG_GCCAC,
+ VEGA10_CONFIGREG_SECAC
+};
+
/* PowerContainment Features */
#define POWERCONTAINMENT_FEATURE_DTE 0x00000001
#define POWERCONTAINMENT_FEATURE_TDCLimit 0x00000002
@@ -44,6 +50,13 @@ struct vega10_pt_config_reg {
enum vega10_pt_config_reg_type type;
};
+struct vega10_didt_config_reg {
+ uint32_t offset;
+ uint32_t mask;
+ uint32_t shift;
+ uint32_t value;
+};
+
struct vega10_pt_defaults {
uint8_t SviLoadLineEn;
uint8_t SviLoadLineVddC;
@@ -62,5 +75,8 @@ int vega10_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n);
int vega10_power_control_set_level(struct pp_hwmgr *hwmgr);
int vega10_disable_power_containment(struct pp_hwmgr *hwmgr);
+int vega10_enable_didt_config(struct pp_hwmgr *hwmgr);
+int vega10_disable_didt_config(struct pp_hwmgr *hwmgr);
+
#endif /* _VEGA10_POWERTUNE_H_ */
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c
index 1623644ea49a..e343df190375 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c
@@ -31,6 +31,8 @@
#include "cgs_common.h"
#include "vega10_pptable.h"
+#define NUM_DSPCLK_LEVELS 8
+
static void set_hw_cap(struct pp_hwmgr *hwmgr, bool enable,
enum phm_platform_caps cap)
{
@@ -644,11 +646,11 @@ static int get_gfxclk_voltage_dependency_table(
return 0;
}
-static int get_dcefclk_voltage_dependency_table(
+static int get_pix_clk_voltage_dependency_table(
struct pp_hwmgr *hwmgr,
struct phm_ppt_v1_clock_voltage_dependency_table
**pp_vega10_clk_dep_table,
- const ATOM_Vega10_DCEFCLK_Dependency_Table *clk_dep_table)
+ const ATOM_Vega10_PIXCLK_Dependency_Table *clk_dep_table)
{
uint32_t table_size, i;
struct phm_ppt_v1_clock_voltage_dependency_table
@@ -681,6 +683,76 @@ static int get_dcefclk_voltage_dependency_table(
return 0;
}
+static int get_dcefclk_voltage_dependency_table(
+ struct pp_hwmgr *hwmgr,
+ struct phm_ppt_v1_clock_voltage_dependency_table
+ **pp_vega10_clk_dep_table,
+ const ATOM_Vega10_DCEFCLK_Dependency_Table *clk_dep_table)
+{
+ uint32_t table_size, i;
+ uint8_t num_entries;
+ struct phm_ppt_v1_clock_voltage_dependency_table
+ *clk_table;
+ struct cgs_system_info sys_info = {0};
+ uint32_t dev_id;
+ uint32_t rev_id;
+
+ PP_ASSERT_WITH_CODE((clk_dep_table->ucNumEntries != 0),
+ "Invalid PowerPlay Table!", return -1);
+
+/*
+ * workaround needed to add another DPM level for pioneer cards
+ * as VBIOS is locked down.
+ * This DPM level was added to support 3DPM monitors @ 4K120Hz
+ *
+ */
+ sys_info.size = sizeof(struct cgs_system_info);
+ sys_info.info_id = CGS_SYSTEM_INFO_PCIE_DEV;
+ cgs_query_system_info(hwmgr->device, &sys_info);
+ dev_id = (uint32_t)sys_info.value;
+
+ sys_info.size = sizeof(struct cgs_system_info);
+ sys_info.info_id = CGS_SYSTEM_INFO_PCIE_REV;
+ cgs_query_system_info(hwmgr->device, &sys_info);
+ rev_id = (uint32_t)sys_info.value;
+
+ if (dev_id == 0x6863 && rev_id == 0 &&
+ clk_dep_table->entries[clk_dep_table->ucNumEntries - 1].ulClk < 90000)
+ num_entries = clk_dep_table->ucNumEntries + 1 > NUM_DSPCLK_LEVELS ?
+ NUM_DSPCLK_LEVELS : clk_dep_table->ucNumEntries + 1;
+ else
+ num_entries = clk_dep_table->ucNumEntries;
+
+
+ table_size = sizeof(uint32_t) +
+ sizeof(phm_ppt_v1_clock_voltage_dependency_record) *
+ num_entries;
+
+ clk_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)
+ kzalloc(table_size, GFP_KERNEL);
+
+ if (!clk_table)
+ return -ENOMEM;
+
+ clk_table->count = (uint32_t)num_entries;
+
+ for (i = 0; i < clk_dep_table->ucNumEntries; i++) {
+ clk_table->entries[i].vddInd =
+ clk_dep_table->entries[i].ucVddInd;
+ clk_table->entries[i].clk =
+ le32_to_cpu(clk_dep_table->entries[i].ulClk);
+ }
+
+ if (i < num_entries) {
+ clk_table->entries[i].vddInd = clk_dep_table->entries[i-1].ucVddInd;
+ clk_table->entries[i].clk = 90000;
+ }
+
+ *pp_vega10_clk_dep_table = clk_table;
+
+ return 0;
+}
+
static int get_pcie_table(struct pp_hwmgr *hwmgr,
struct phm_ppt_v1_pcie_table **vega10_pcie_table,
const Vega10_PPTable_Generic_SubTable_Header *table)
@@ -862,21 +934,21 @@ static int init_powerplay_extended_tables(
gfxclk_dep_table);
if (!result && powerplay_table->usPixclkDependencyTableOffset)
- result = get_dcefclk_voltage_dependency_table(hwmgr,
+ result = get_pix_clk_voltage_dependency_table(hwmgr,
&pp_table_info->vdd_dep_on_pixclk,
- (const ATOM_Vega10_DCEFCLK_Dependency_Table*)
+ (const ATOM_Vega10_PIXCLK_Dependency_Table*)
pixclk_dep_table);
if (!result && powerplay_table->usPhyClkDependencyTableOffset)
- result = get_dcefclk_voltage_dependency_table(hwmgr,
+ result = get_pix_clk_voltage_dependency_table(hwmgr,
&pp_table_info->vdd_dep_on_phyclk,
- (const ATOM_Vega10_DCEFCLK_Dependency_Table *)
+ (const ATOM_Vega10_PIXCLK_Dependency_Table *)
phyclk_dep_table);
if (!result && powerplay_table->usDispClkDependencyTableOffset)
- result = get_dcefclk_voltage_dependency_table(hwmgr,
+ result = get_pix_clk_voltage_dependency_table(hwmgr,
&pp_table_info->vdd_dep_on_dispclk,
- (const ATOM_Vega10_DCEFCLK_Dependency_Table *)
+ (const ATOM_Vega10_PIXCLK_Dependency_Table *)
dispclk_dep_table);
if (!result && powerplay_table->usDcefclkDependencyTableOffset)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c
index e7ab8eb8a0cf..d44243441d28 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c
@@ -321,10 +321,7 @@ int vega10_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr)
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_MicrocodeFanControl)) {
- result = vega10_fan_ctrl_set_static_mode(hwmgr,
- FDO_PWM_MODE_STATIC);
- if (!result)
- result = vega10_fan_ctrl_start_smc_fan_control(hwmgr);
+ result = vega10_fan_ctrl_start_smc_fan_control(hwmgr);
} else
result = vega10_fan_ctrl_set_default_mode(hwmgr);
@@ -633,7 +630,6 @@ int tf_vega10_thermal_start_smc_fan_control(struct pp_hwmgr *hwmgr,
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_MicrocodeFanControl)) {
vega10_fan_ctrl_start_smc_fan_control(hwmgr);
- vega10_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
}
return 0;
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
index a1ebe1014492..a4c8b09b6f14 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
@@ -164,9 +164,14 @@ enum phm_platform_caps {
PHM_PlatformCaps_EnablePlatformPowerManagement, /* indicates that Platform Power Management feature is supported */
PHM_PlatformCaps_SurpriseRemoval, /* indicates that surprise removal feature is requested */
PHM_PlatformCaps_NewCACVoltage, /* indicates new CAC voltage table support */
+ PHM_PlatformCaps_DiDtSupport, /* for dI/dT feature */
PHM_PlatformCaps_DBRamping, /* for dI/dT feature */
PHM_PlatformCaps_TDRamping, /* for dI/dT feature */
PHM_PlatformCaps_TCPRamping, /* for dI/dT feature */
+ PHM_PlatformCaps_DBRRamping, /* for dI/dT feature */
+ PHM_PlatformCaps_DiDtEDCEnable, /* for dI/dT feature */
+ PHM_PlatformCaps_GCEDC, /* for dI/dT feature */
+ PHM_PlatformCaps_PSM, /* for dI/dT feature */
PHM_PlatformCaps_EnableSMU7ThermalManagement, /* SMC will manage thermal events */
PHM_PlatformCaps_FPS, /* FPS support */
PHM_PlatformCaps_ACP, /* ACP support */
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
index 47e57bd2c36f..91b0105e8240 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
@@ -128,6 +128,8 @@ struct phm_uvd_arbiter {
uint32_t dclk;
uint32_t vclk_ceiling;
uint32_t dclk_ceiling;
+ uint32_t vclk_soft_min;
+ uint32_t dclk_soft_min;
};
struct phm_vce_arbiter {
diff --git a/drivers/gpu/drm/amd/powerplay/inc/pp_debug.h b/drivers/gpu/drm/amd/powerplay/inc/pp_debug.h
index f3f9ebb631a5..822cd8b5bf90 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/pp_debug.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/pp_debug.h
@@ -42,6 +42,12 @@
} \
} while (0)
+#define PP_ASSERT(cond, msg) \
+ do { \
+ if (!(cond)) { \
+ pr_warn("%s\n", msg); \
+ } \
+ } while (0)
#define PP_DBG_LOG(fmt, ...) \
do { \
diff --git a/drivers/gpu/drm/amd/powerplay/inc/pp_soc15.h b/drivers/gpu/drm/amd/powerplay/inc/pp_soc15.h
index 227d999b6bd1..a511611ec7e0 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/pp_soc15.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/pp_soc15.h
@@ -41,6 +41,8 @@ inline static uint32_t soc15_get_register_offset(
reg = MP1_BASE.instance[inst].segment[segment] + offset;
else if (hw_id == DF_HWID)
reg = DF_BASE.instance[inst].segment[segment] + offset;
+ else if (hw_id == GC_HWID)
+ reg = GC_BASE.instance[inst].segment[segment] + offset;
return reg;
}
diff --git a/drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h
index e0e106f1b23a..901c960cfe21 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h
@@ -66,7 +66,12 @@
#define PPSMC_MSG_SetMinVddcrSocVoltage 0x22
#define PPSMC_MSG_SetMinVideoFclkFreq 0x23
#define PPSMC_MSG_SetMinDeepSleepDcefclk 0x24
-#define PPSMC_Message_Count 0x25
+#define PPSMC_MSG_ForcePowerDownGfx 0x25
+#define PPSMC_MSG_SetPhyclkVoltageByFreq 0x26
+#define PPSMC_MSG_SetDppclkVoltageByFreq 0x27
+#define PPSMC_MSG_SetSoftMinVcn 0x28
+#define PPSMC_Message_Count 0x29
+
typedef uint16_t PPSMC_Result;
typedef int PPSMC_Msg;
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu9.h b/drivers/gpu/drm/amd/powerplay/inc/smu9.h
index 9ef2490c7c2e..550ed675027a 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu9.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu9.h
@@ -55,9 +55,9 @@
#define FEATURE_FW_CTF_BIT 23
#define FEATURE_LED_DISPLAY_BIT 24
#define FEATURE_FAN_CONTROL_BIT 25
-#define FEATURE_VOLTAGE_CONTROLLER_BIT 26
-#define FEATURE_SPARE_27_BIT 27
-#define FEATURE_SPARE_28_BIT 28
+#define FEATURE_FAST_PPT_BIT 26
+#define FEATURE_GFX_EDC_BIT 27
+#define FEATURE_ACG_BIT 28
#define FEATURE_SPARE_29_BIT 29
#define FEATURE_SPARE_30_BIT 30
#define FEATURE_SPARE_31_BIT 31
@@ -90,9 +90,10 @@
#define FFEATURE_FW_CTF_MASK (1 << FEATURE_FW_CTF_BIT )
#define FFEATURE_LED_DISPLAY_MASK (1 << FEATURE_LED_DISPLAY_BIT )
#define FFEATURE_FAN_CONTROL_MASK (1 << FEATURE_FAN_CONTROL_BIT )
-#define FFEATURE_VOLTAGE_CONTROLLER_MASK (1 << FEATURE_VOLTAGE_CONTROLLER_BIT )
-#define FFEATURE_SPARE_27_MASK (1 << FEATURE_SPARE_27_BIT )
-#define FFEATURE_SPARE_28_MASK (1 << FEATURE_SPARE_28_BIT )
+
+#define FEATURE_FAST_PPT_MASK (1 << FAST_PPT_BIT )
+#define FEATURE_GFX_EDC_MASK (1 << FEATURE_GFX_EDC_BIT )
+#define FEATURE_ACG_MASK (1 << FEATURE_ACG_BIT )
#define FFEATURE_SPARE_29_MASK (1 << FEATURE_SPARE_29_BIT )
#define FFEATURE_SPARE_30_MASK (1 << FEATURE_SPARE_30_BIT )
#define FFEATURE_SPARE_31_MASK (1 << FEATURE_SPARE_31_BIT )
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu9_driver_if.h b/drivers/gpu/drm/amd/powerplay/inc/smu9_driver_if.h
index 532186b6f941..2818c98ff5ca 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu9_driver_if.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu9_driver_if.h
@@ -312,10 +312,15 @@ typedef struct {
PllSetting_t GfxBoostState;
- uint32_t Reserved[14];
+ uint8_t AcgEnable[NUM_GFXCLK_DPM_LEVELS];
+ GbVdroopTable_t AcgBtcGbVdroopTable;
+ QuadraticInt_t AcgAvfsGb;
+
+ /* ACG Frequency Table, in Mhz */
+ uint32_t AcgFreqTable[NUM_GFXCLK_DPM_LEVELS];
/* Padding - ignore */
- uint32_t MmHubPadding[7]; /* SMU internal use */
+ uint32_t MmHubPadding[3]; /* SMU internal use */
} PPTable_t;
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h
index 976e942ec694..5d61cc9d4554 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h
@@ -131,6 +131,7 @@ struct pp_smumgr_func {
bool (*is_dpm_running)(struct pp_hwmgr *hwmgr);
int (*populate_requested_graphic_levels)(struct pp_hwmgr *hwmgr,
struct amd_pp_profile *request);
+ bool (*is_hw_avfs_present)(struct pp_smumgr *smumgr);
};
struct pp_smumgr {
@@ -202,6 +203,8 @@ extern bool smum_is_dpm_running(struct pp_hwmgr *hwmgr);
extern int smum_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr,
struct amd_pp_profile *request);
+extern bool smum_is_hw_avfs_present(struct pp_smumgr *smumgr);
+
#define SMUM_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT
#define SMUM_FIELD_MASK(reg, field) reg##__##field##_MASK
diff --git a/drivers/gpu/drm/amd/powerplay/inc/vega10_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/vega10_ppsmc.h
index b4af9e85dfa5..cb070ebc7de1 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/vega10_ppsmc.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/vega10_ppsmc.h
@@ -124,6 +124,10 @@ typedef uint16_t PPSMC_Result;
#define PPSMC_MSG_NumOfDisplays 0x56
#define PPSMC_MSG_ReadSerialNumTop32 0x58
#define PPSMC_MSG_ReadSerialNumBottom32 0x59
+#define PPSMC_MSG_RunAcgBtc 0x5C
+#define PPSMC_MSG_RunAcgInClosedLoop 0x5D
+#define PPSMC_MSG_RunAcgInOpenLoop 0x5E
+#define PPSMC_MSG_InitializeAcg 0x5F
#define PPSMC_MSG_GetCurrPkgPwr 0x61
#define PPSMC_Message_Count 0x62
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c
index 6a320b27aefd..8712f093d6d9 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c
@@ -2129,6 +2129,25 @@ int fiji_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
return 0;
}
+
+int fiji_thermal_avfs_enable(struct pp_hwmgr *hwmgr)
+{
+ int ret;
+ struct pp_smumgr *smumgr = (struct pp_smumgr *)(hwmgr->smumgr);
+ struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend);
+
+ if (smu_data->avfs.avfs_btc_status != AVFS_BTC_ENABLEAVFS)
+ return 0;
+
+ ret = smum_send_msg_to_smc(smumgr, PPSMC_MSG_EnableAvfs);
+
+ if (!ret)
+ /* If this param is not changed, this function could fire unnecessarily */
+ smu_data->avfs.avfs_btc_status = AVFS_BTC_COMPLETED_PREVIOUSLY;
+
+ return ret;
+}
+
static int fiji_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.h b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.h
index 0e9e1f2d7238..d9c72d992e30 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.h
@@ -48,5 +48,6 @@ int fiji_initialize_mc_reg_table(struct pp_hwmgr *hwmgr);
bool fiji_is_dpm_running(struct pp_hwmgr *hwmgr);
int fiji_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr,
struct amd_pp_profile *request);
+int fiji_thermal_avfs_enable(struct pp_hwmgr *hwmgr);
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
index a1cb78552cf6..6ae948fc524f 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
@@ -161,56 +161,47 @@ static int fiji_start_smu_in_non_protection_mode(struct pp_smumgr *smumgr)
static int fiji_setup_pwr_virus(struct pp_smumgr *smumgr)
{
- int i, result = -1;
+ int i;
+ int result = -EINVAL;
uint32_t reg, data;
- const PWR_Command_Table *virus = PwrVirusTable;
- struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend);
- priv->avfs.AvfsBtcStatus = AVFS_LOAD_VIRUS;
- for (i = 0; (i < PWR_VIRUS_TABLE_SIZE); i++) {
- switch (virus->command) {
+ const PWR_Command_Table *pvirus = PwrVirusTable;
+ struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend);
+
+ for (i = 0; i < PWR_VIRUS_TABLE_SIZE; i++) {
+ switch (pvirus->command) {
case PwrCmdWrite:
- reg = virus->reg;
- data = virus->data;
+ reg = pvirus->reg;
+ data = pvirus->data;
cgs_write_register(smumgr->device, reg, data);
break;
+
case PwrCmdEnd:
- priv->avfs.AvfsBtcStatus = AVFS_BTC_VIRUS_LOADED;
result = 0;
break;
+
default:
- pr_err("Table Exit with Invalid Command!");
- priv->avfs.AvfsBtcStatus = AVFS_BTC_VIRUS_FAIL;
- result = -1;
+ pr_info("Table Exit with Invalid Command!");
+ smu_data->avfs.avfs_btc_status = AVFS_BTC_VIRUS_FAIL;
+ result = -EINVAL;
break;
}
- virus++;
+ pvirus++;
}
+
return result;
}
static int fiji_start_avfs_btc(struct pp_smumgr *smumgr)
{
int result = 0;
- struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend);
+ struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend);
- priv->avfs.AvfsBtcStatus = AVFS_BTC_STARTED;
- if (priv->avfs.AvfsBtcParam) {
- if (!smum_send_msg_to_smc_with_parameter(smumgr,
- PPSMC_MSG_PerformBtc, priv->avfs.AvfsBtcParam)) {
- if (!smum_send_msg_to_smc(smumgr, PPSMC_MSG_EnableAvfs)) {
- priv->avfs.AvfsBtcStatus = AVFS_BTC_COMPLETED_UNSAVED;
- result = 0;
- } else {
- pr_err("[AVFS][fiji_start_avfs_btc] Attempt"
- " to Enable AVFS Failed!");
- smum_send_msg_to_smc(smumgr, PPSMC_MSG_DisableAvfs);
- result = -1;
- }
- } else {
- pr_err("[AVFS][fiji_start_avfs_btc] "
- "PerformBTC SMU msg failed");
- result = -1;
+ if (0 != smu_data->avfs.avfs_btc_param) {
+ if (0 != smu7_send_msg_to_smc_with_parameter(smumgr,
+ PPSMC_MSG_PerformBtc, smu_data->avfs.avfs_btc_param)) {
+ pr_info("[AVFS][Fiji_PerformBtc] PerformBTC SMU msg failed");
+ result = -EINVAL;
}
}
/* Soft-Reset to reset the engine before loading uCode */
@@ -224,42 +215,6 @@ static int fiji_start_avfs_btc(struct pp_smumgr *smumgr)
return result;
}
-static int fiji_setup_pm_fuse_for_avfs(struct pp_smumgr *smumgr)
-{
- int result = 0;
- uint32_t table_start;
- uint32_t charz_freq_addr, inversion_voltage_addr, charz_freq;
- uint16_t inversion_voltage;
-
- charz_freq = 0x30750000; /* In 10KHz units 0x00007530 Actual value */
- inversion_voltage = 0x1A04; /* mV Q14.2 0x41A Actual value */
-
- PP_ASSERT_WITH_CODE(0 == smu7_read_smc_sram_dword(smumgr,
- SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU73_Firmware_Header,
- PmFuseTable), &table_start, 0x40000),
- "[AVFS][Fiji_SetupGfxLvlStruct] SMU could not communicate "
- "starting address of PmFuse structure",
- return -1;);
-
- charz_freq_addr = table_start +
- offsetof(struct SMU73_Discrete_PmFuses, PsmCharzFreq);
- inversion_voltage_addr = table_start +
- offsetof(struct SMU73_Discrete_PmFuses, InversionVoltage);
-
- result = smu7_copy_bytes_to_smc(smumgr, charz_freq_addr,
- (uint8_t *)(&charz_freq), sizeof(charz_freq), 0x40000);
- PP_ASSERT_WITH_CODE(0 == result,
- "[AVFS][fiji_setup_pm_fuse_for_avfs] charz_freq could not "
- "be populated.", return -1;);
-
- result = smu7_copy_bytes_to_smc(smumgr, inversion_voltage_addr,
- (uint8_t *)(&inversion_voltage), sizeof(inversion_voltage), 0x40000);
- PP_ASSERT_WITH_CODE(0 == result, "[AVFS][fiji_setup_pm_fuse_for_avfs] "
- "charz_freq could not be populated.", return -1;);
-
- return result;
-}
-
static int fiji_setup_graphics_level_structure(struct pp_smumgr *smumgr)
{
int32_t vr_config;
@@ -298,93 +253,41 @@ static int fiji_setup_graphics_level_structure(struct pp_smumgr *smumgr)
return 0;
}
-/* Work in Progress */
-static int fiji_restore_vft_table(struct pp_smumgr *smumgr)
-{
- struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend);
-
- if (AVFS_BTC_COMPLETED_SAVED == priv->avfs.AvfsBtcStatus) {
- priv->avfs.AvfsBtcStatus = AVFS_BTC_COMPLETED_RESTORED;
- return 0;
- } else
- return -EINVAL;
-}
-
-/* Work in Progress */
-static int fiji_save_vft_table(struct pp_smumgr *smumgr)
-{
- struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend);
-
- if (AVFS_BTC_COMPLETED_SAVED == priv->avfs.AvfsBtcStatus) {
- priv->avfs.AvfsBtcStatus = AVFS_BTC_COMPLETED_RESTORED;
- return 0;
- } else
- return -EINVAL;
-}
-
static int fiji_avfs_event_mgr(struct pp_smumgr *smumgr, bool smu_started)
{
- struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend);
+ struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend);
- switch (priv->avfs.AvfsBtcStatus) {
- case AVFS_BTC_COMPLETED_SAVED: /*S3 State - Pre SMU Start */
- priv->avfs.AvfsBtcStatus = AVFS_BTC_RESTOREVFT_FAILED;
- PP_ASSERT_WITH_CODE(0 == fiji_restore_vft_table(smumgr),
- "[AVFS][fiji_avfs_event_mgr] Could not Copy Graphics "
- "Level table over to SMU",
- return -1;);
- priv->avfs.AvfsBtcStatus = AVFS_BTC_COMPLETED_RESTORED;
- break;
- case AVFS_BTC_COMPLETED_RESTORED: /*S3 State - Post SMU Start*/
- priv->avfs.AvfsBtcStatus = AVFS_BTC_SMUMSG_ERROR;
- PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(smumgr,
- 0x666),
- "[AVFS][fiji_avfs_event_mgr] SMU did not respond "
- "correctly to VftTableIsValid Msg",
- return -1;);
- priv->avfs.AvfsBtcStatus = AVFS_BTC_SMUMSG_ERROR;
- PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(smumgr,
- PPSMC_MSG_EnableAvfs),
- "[AVFS][fiji_avfs_event_mgr] SMU did not respond "
- "correctly to EnableAvfs Message Msg",
- return -1;);
- priv->avfs.AvfsBtcStatus = AVFS_BTC_COMPLETED_SAVED;
+ switch (smu_data->avfs.avfs_btc_status) {
+ case AVFS_BTC_COMPLETED_PREVIOUSLY:
break;
+
case AVFS_BTC_BOOT: /*Cold Boot State - Post SMU Start*/
if (!smu_started)
break;
- priv->avfs.AvfsBtcStatus = AVFS_BTC_FAILED;
- PP_ASSERT_WITH_CODE(0 == fiji_setup_pm_fuse_for_avfs(smumgr),
- "[AVFS][fiji_avfs_event_mgr] Failure at "
- "fiji_setup_pm_fuse_for_avfs",
- return -1;);
- priv->avfs.AvfsBtcStatus = AVFS_BTC_DPMTABLESETUP_FAILED;
+ smu_data->avfs.avfs_btc_status = AVFS_BTC_FAILED;
PP_ASSERT_WITH_CODE(0 == fiji_setup_graphics_level_structure(smumgr),
"[AVFS][fiji_avfs_event_mgr] Could not Copy Graphics Level"
" table over to SMU",
- return -1;);
- priv->avfs.AvfsBtcStatus = AVFS_BTC_VIRUS_FAIL;
+ return -EINVAL;);
+ smu_data->avfs.avfs_btc_status = AVFS_BTC_VIRUS_FAIL;
PP_ASSERT_WITH_CODE(0 == fiji_setup_pwr_virus(smumgr),
"[AVFS][fiji_avfs_event_mgr] Could not setup "
"Pwr Virus for AVFS ",
- return -1;);
- priv->avfs.AvfsBtcStatus = AVFS_BTC_FAILED;
+ return -EINVAL;);
+ smu_data->avfs.avfs_btc_status = AVFS_BTC_FAILED;
PP_ASSERT_WITH_CODE(0 == fiji_start_avfs_btc(smumgr),
"[AVFS][fiji_avfs_event_mgr] Failure at "
"fiji_start_avfs_btc. AVFS Disabled",
- return -1;);
- priv->avfs.AvfsBtcStatus = AVFS_BTC_SAVEVFT_FAILED;
- PP_ASSERT_WITH_CODE(0 == fiji_save_vft_table(smumgr),
- "[AVFS][fiji_avfs_event_mgr] Could not save VFT Table",
- return -1;);
- priv->avfs.AvfsBtcStatus = AVFS_BTC_COMPLETED_SAVED;
+ return -EINVAL;);
+
+ smu_data->avfs.avfs_btc_status = AVFS_BTC_ENABLEAVFS;
break;
case AVFS_BTC_DISABLED: /* Do nothing */
- break;
case AVFS_BTC_NOTSUPPORTED: /* Do nothing */
+ case AVFS_BTC_ENABLEAVFS:
break;
default:
- pr_err("[AVFS] Something is broken. See log!");
+ pr_err("AVFS failed status is %x !\n", smu_data->avfs.avfs_btc_status);
break;
}
return 0;
@@ -477,19 +380,6 @@ static int fiji_smu_init(struct pp_smumgr *smumgr)
if (smu7_init(smumgr))
return -EINVAL;
- fiji_priv->avfs.AvfsBtcStatus = AVFS_BTC_BOOT;
- if (fiji_is_hw_avfs_present(smumgr))
- /* AVFS Parameter
- * 0 - BTC DC disabled, BTC AC disabled
- * 1 - BTC DC enabled, BTC AC disabled
- * 2 - BTC DC disabled, BTC AC enabled
- * 3 - BTC DC enabled, BTC AC enabled
- * Default is 0 - BTC DC disabled, BTC AC disabled
- */
- fiji_priv->avfs.AvfsBtcParam = 0;
- else
- fiji_priv->avfs.AvfsBtcStatus = AVFS_BTC_NOTSUPPORTED;
-
for (i = 0; i < SMU73_MAX_LEVELS_GRAPHICS; i++)
fiji_priv->activity_target[i] = 30;
@@ -514,10 +404,12 @@ const struct pp_smumgr_func fiji_smu_funcs = {
.init_smc_table = fiji_init_smc_table,
.update_sclk_threshold = fiji_update_sclk_threshold,
.thermal_setup_fan_table = fiji_thermal_setup_fan_table,
+ .thermal_avfs_enable = fiji_thermal_avfs_enable,
.populate_all_graphic_levels = fiji_populate_all_graphic_levels,
.populate_all_memory_levels = fiji_populate_all_memory_levels,
.get_mac_definition = fiji_get_mac_definition,
.initialize_mc_reg_table = fiji_initialize_mc_reg_table,
.is_dpm_running = fiji_is_dpm_running,
.populate_requested_graphic_levels = fiji_populate_requested_graphic_levels,
+ .is_hw_avfs_present = fiji_is_hw_avfs_present,
};
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.h
index adcbdfb209be..175bf9f8ef9c 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.h
@@ -28,17 +28,8 @@
#include "smu7_smumgr.h"
-
-struct fiji_smu_avfs {
- enum AVFS_BTC_STATUS AvfsBtcStatus;
- uint32_t AvfsBtcParam;
-};
-
-
struct fiji_smumgr {
struct smu7_smumgr smu7_data;
-
- struct fiji_smu_avfs avfs;
struct SMU73_Discrete_DpmTable smc_state_table;
struct SMU73_Discrete_Ulv ulv_setting;
struct SMU73_Discrete_PmFuses power_tune_table;
@@ -47,7 +38,5 @@ struct fiji_smumgr {
};
-
-
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c
index f68e759e8be2..99a00bd39256 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c
@@ -1498,7 +1498,7 @@ static int polaris10_populate_avfs_parameters(struct pp_hwmgr *hwmgr)
table_info->vdd_dep_on_sclk;
- if (smu_data->avfs.avfs_btc_status == AVFS_BTC_NOTSUPPORTED)
+ if (((struct smu7_smumgr *)smu_data)->avfs.avfs_btc_status == AVFS_BTC_NOTSUPPORTED)
return result;
result = atomctrl_get_avfs_information(hwmgr, &avfs_params);
@@ -1889,7 +1889,7 @@ int polaris10_thermal_avfs_enable(struct pp_hwmgr *hwmgr)
{
int ret;
struct pp_smumgr *smumgr = (struct pp_smumgr *)(hwmgr->smumgr);
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
+ struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend);
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
if (smu_data->avfs.avfs_btc_status == AVFS_BTC_NOTSUPPORTED)
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
index 9616cedc139c..75f43dadc56b 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
@@ -60,16 +60,14 @@ static const SMU74_Discrete_GraphicsLevel avfs_graphics_level_polaris10[8] = {
static const SMU74_Discrete_MemoryLevel avfs_memory_level_polaris10 = {
0x100ea446, 0, 0x30750000, 0x01, 0x01, 0x01, 0x00, 0x00, 0x64, 0x00, 0x00, 0x1f00, 0x00, 0x00};
-
static int polaris10_setup_pwr_virus(struct pp_smumgr *smumgr)
{
int i;
- int result = -1;
+ int result = -EINVAL;
uint32_t reg, data;
const PWR_Command_Table *pvirus = pwr_virus_table;
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
-
+ struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend);
for (i = 0; i < PWR_VIRUS_TABLE_SIZE; i++) {
switch (pvirus->command) {
@@ -86,7 +84,7 @@ static int polaris10_setup_pwr_virus(struct pp_smumgr *smumgr)
default:
pr_info("Table Exit with Invalid Command!");
smu_data->avfs.avfs_btc_status = AVFS_BTC_VIRUS_FAIL;
- result = -1;
+ result = -EINVAL;
break;
}
pvirus++;
@@ -98,7 +96,7 @@ static int polaris10_setup_pwr_virus(struct pp_smumgr *smumgr)
static int polaris10_perform_btc(struct pp_smumgr *smumgr)
{
int result = 0;
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
+ struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend);
if (0 != smu_data->avfs.avfs_btc_param) {
if (0 != smu7_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_PerformBtc, smu_data->avfs.avfs_btc_param)) {
@@ -172,10 +170,11 @@ static int polaris10_setup_graphics_level_structure(struct pp_smumgr *smumgr)
return 0;
}
+
static int
polaris10_avfs_event_mgr(struct pp_smumgr *smumgr, bool SMU_VFT_INTACT)
{
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
+ struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend);
switch (smu_data->avfs.avfs_btc_status) {
case AVFS_BTC_COMPLETED_PREVIOUSLY:
@@ -185,30 +184,31 @@ polaris10_avfs_event_mgr(struct pp_smumgr *smumgr, bool SMU_VFT_INTACT)
smu_data->avfs.avfs_btc_status = AVFS_BTC_DPMTABLESETUP_FAILED;
PP_ASSERT_WITH_CODE(0 == polaris10_setup_graphics_level_structure(smumgr),
- "[AVFS][Polaris10_AVFSEventMgr] Could not Copy Graphics Level table over to SMU",
- return -1);
+ "[AVFS][Polaris10_AVFSEventMgr] Could not Copy Graphics Level table over to SMU",
+ return -EINVAL);
if (smu_data->avfs.avfs_btc_param > 1) {
pr_info("[AVFS][Polaris10_AVFSEventMgr] AC BTC has not been successfully verified on Fiji. There may be in this setting.");
smu_data->avfs.avfs_btc_status = AVFS_BTC_VIRUS_FAIL;
- PP_ASSERT_WITH_CODE(-1 == polaris10_setup_pwr_virus(smumgr),
+ PP_ASSERT_WITH_CODE(0 == polaris10_setup_pwr_virus(smumgr),
"[AVFS][Polaris10_AVFSEventMgr] Could not setup Pwr Virus for AVFS ",
- return -1);
+ return -EINVAL);
}
smu_data->avfs.avfs_btc_status = AVFS_BTC_FAILED;
PP_ASSERT_WITH_CODE(0 == polaris10_perform_btc(smumgr),
"[AVFS][Polaris10_AVFSEventMgr] Failure at SmuPolaris10_PerformBTC. AVFS Disabled",
- return -1);
-
+ return -EINVAL);
+ smu_data->avfs.avfs_btc_status = AVFS_BTC_ENABLEAVFS;
break;
case AVFS_BTC_DISABLED:
+ case AVFS_BTC_ENABLEAVFS:
case AVFS_BTC_NOTSUPPORTED:
break;
default:
- pr_info("[AVFS] Something is broken. See log!");
+ pr_err("AVFS failed status is %x!\n", smu_data->avfs.avfs_btc_status);
break;
}
@@ -376,11 +376,6 @@ static int polaris10_smu_init(struct pp_smumgr *smumgr)
if (smu7_init(smumgr))
return -EINVAL;
- if (polaris10_is_hw_avfs_present(smumgr))
- smu_data->avfs.avfs_btc_status = AVFS_BTC_BOOT;
- else
- smu_data->avfs.avfs_btc_status = AVFS_BTC_NOTSUPPORTED;
-
for (i = 0; i < SMU74_MAX_LEVELS_GRAPHICS; i++)
smu_data->activity_target[i] = PPPOLARIS10_TARGETACTIVITY_DFLT;
@@ -410,4 +405,5 @@ const struct pp_smumgr_func polaris10_smu_funcs = {
.get_mac_definition = polaris10_get_mac_definition,
.is_dpm_running = polaris10_is_dpm_running,
.populate_requested_graphic_levels = polaris10_populate_requested_graphic_levels,
+ .is_hw_avfs_present = polaris10_is_hw_avfs_present,
};
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.h
index 49ebf1d5a53c..5e19c24b0561 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.h
@@ -32,11 +32,6 @@
#define SMC_RAM_END 0x40000
-struct polaris10_avfs {
- enum AVFS_BTC_STATUS avfs_btc_status;
- uint32_t avfs_btc_param;
-};
-
struct polaris10_pt_defaults {
uint8_t SviLoadLineEn;
uint8_t SviLoadLineVddC;
@@ -51,8 +46,6 @@ struct polaris10_pt_defaults {
uint16_t BAPMTI_RC[SMU74_DTE_ITERATIONS * SMU74_DTE_SOURCES * SMU74_DTE_SINKS];
};
-
-
struct polaris10_range_table {
uint32_t trans_lower_frequency; /* in 10khz */
uint32_t trans_upper_frequency;
@@ -61,14 +54,13 @@ struct polaris10_range_table {
struct polaris10_smumgr {
struct smu7_smumgr smu7_data;
uint8_t protected_mode;
- struct polaris10_avfs avfs;
SMU74_Discrete_DpmTable smc_state_table;
struct SMU74_Discrete_Ulv ulv_setting;
struct SMU74_Discrete_PmFuses power_tune_table;
struct polaris10_range_table range_table[NUM_SCLK_RANGE];
const struct polaris10_pt_defaults *power_tune_defaults;
- uint32_t activity_target[SMU74_MAX_LEVELS_GRAPHICS];
- uint32_t bif_sclk_table[SMU74_MAX_LEVELS_LINK];
+ uint32_t activity_target[SMU74_MAX_LEVELS_GRAPHICS];
+ uint32_t bif_sclk_table[SMU74_MAX_LEVELS_LINK];
};
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
index 35ac27681415..c49a6f22002f 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
@@ -380,7 +380,8 @@ static int smu7_populate_single_firmware_entry(struct pp_smumgr *smumgr,
entry->num_register_entries = 0;
}
- if (fw_type == UCODE_ID_RLC_G)
+ if ((fw_type == UCODE_ID_RLC_G)
+ || (fw_type == UCODE_ID_CP_MEC))
entry->flags = 1;
else
entry->flags = 0;
@@ -540,7 +541,6 @@ int smu7_upload_smu_firmware_image(struct pp_smumgr *smumgr)
return result;
}
-
int smu7_init(struct pp_smumgr *smumgr)
{
struct smu7_smumgr *smu_data;
@@ -596,6 +596,11 @@ int smu7_init(struct pp_smumgr *smumgr)
(cgs_handle_t)smu_data->smu_buffer.handle);
return -EINVAL);
+ if (smum_is_hw_avfs_present(smumgr))
+ smu_data->avfs.avfs_btc_status = AVFS_BTC_BOOT;
+ else
+ smu_data->avfs.avfs_btc_status = AVFS_BTC_NOTSUPPORTED;
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h
index 919be435b49c..ee5e32d2921e 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h
@@ -37,6 +37,11 @@ struct smu7_buffer_entry {
unsigned long handle;
};
+struct smu7_avfs {
+ enum AVFS_BTC_STATUS avfs_btc_status;
+ uint32_t avfs_btc_param;
+};
+
struct smu7_smumgr {
uint8_t *header;
uint8_t *mec_image;
@@ -50,7 +55,8 @@ struct smu7_smumgr {
uint32_t arb_table_start;
uint32_t ulv_setting_starts;
uint8_t security_hard_key;
- uint32_t acpi_optimization;
+ uint32_t acpi_optimization;
+ struct smu7_avfs avfs;
};
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
index bcc61ffd13cb..3bdf6478de7f 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
@@ -43,7 +43,8 @@ MODULE_FIRMWARE("amdgpu/polaris11_smc.bin");
MODULE_FIRMWARE("amdgpu/polaris11_smc_sk.bin");
MODULE_FIRMWARE("amdgpu/polaris11_k_smc.bin");
MODULE_FIRMWARE("amdgpu/polaris12_smc.bin");
-
+MODULE_FIRMWARE("amdgpu/vega10_smc.bin");
+MODULE_FIRMWARE("amdgpu/vega10_acg_smc.bin");
int smum_early_init(struct pp_instance *handle)
{
@@ -403,3 +404,11 @@ int smum_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr,
return 0;
}
+
+bool smum_is_hw_avfs_present(struct pp_smumgr *smumgr)
+{
+ if (smumgr->smumgr_funcs->is_hw_avfs_present)
+ return smumgr->smumgr_funcs->is_hw_avfs_present(smumgr);
+
+ return false;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
index 269678443862..408514c965a0 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
@@ -356,6 +356,9 @@ int vega10_set_tools_address(struct pp_smumgr *smumgr)
static int vega10_verify_smc_interface(struct pp_smumgr *smumgr)
{
uint32_t smc_driver_if_version;
+ struct cgs_system_info sys_info = {0};
+ uint32_t dev_id;
+ uint32_t rev_id;
PP_ASSERT_WITH_CODE(!vega10_send_msg_to_smc(smumgr,
PPSMC_MSG_GetDriverIfVersion),
@@ -363,12 +366,27 @@ static int vega10_verify_smc_interface(struct pp_smumgr *smumgr)
return -EINVAL);
vega10_read_arg_from_smc(smumgr, &smc_driver_if_version);
- if (smc_driver_if_version != SMU9_DRIVER_IF_VERSION) {
- pr_err("Your firmware(0x%x) doesn't match \
- SMU9_DRIVER_IF_VERSION(0x%x). \
- Please update your firmware!\n",
- smc_driver_if_version, SMU9_DRIVER_IF_VERSION);
- return -EINVAL;
+ sys_info.size = sizeof(struct cgs_system_info);
+ sys_info.info_id = CGS_SYSTEM_INFO_PCIE_DEV;
+ cgs_query_system_info(smumgr->device, &sys_info);
+ dev_id = (uint32_t)sys_info.value;
+
+ sys_info.size = sizeof(struct cgs_system_info);
+ sys_info.info_id = CGS_SYSTEM_INFO_PCIE_REV;
+ cgs_query_system_info(smumgr->device, &sys_info);
+ rev_id = (uint32_t)sys_info.value;
+
+ if (!((dev_id == 0x687f) &&
+ ((rev_id == 0xc0) ||
+ (rev_id == 0xc1) ||
+ (rev_id == 0xc3)))) {
+ if (smc_driver_if_version != SMU9_DRIVER_IF_VERSION) {
+ pr_err("Your firmware(0x%x) doesn't match \
+ SMU9_DRIVER_IF_VERSION(0x%x). \
+ Please update your firmware!\n",
+ smc_driver_if_version, SMU9_DRIVER_IF_VERSION);
+ return -EINVAL;
+ }
}
return 0;
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h b/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h
index dbd4fd3a810b..8bd38102b58e 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h
+++ b/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h
@@ -16,16 +16,16 @@ TRACE_EVENT(amd_sched_job,
TP_ARGS(sched_job),
TP_STRUCT__entry(
__field(struct amd_sched_entity *, entity)
- __field(struct amd_sched_job *, sched_job)
__field(struct dma_fence *, fence)
__field(const char *, name)
+ __field(uint64_t, id)
__field(u32, job_count)
__field(int, hw_job_count)
),
TP_fast_assign(
__entry->entity = sched_job->s_entity;
- __entry->sched_job = sched_job;
+ __entry->id = sched_job->id;
__entry->fence = &sched_job->s_fence->finished;
__entry->name = sched_job->sched->name;
__entry->job_count = kfifo_len(
@@ -33,8 +33,9 @@ TRACE_EVENT(amd_sched_job,
__entry->hw_job_count = atomic_read(
&sched_job->sched->hw_rq_count);
),
- TP_printk("entity=%p, sched job=%p, fence=%p, ring=%s, job count:%u, hw job count:%d",
- __entry->entity, __entry->sched_job, __entry->fence, __entry->name,
+ TP_printk("entity=%p, id=%llu, fence=%p, ring=%s, job count:%u, hw job count:%d",
+ __entry->entity, __entry->id,
+ __entry->fence, __entry->name,
__entry->job_count, __entry->hw_job_count)
);
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
index 38cea6fb25a8..97c94f9683fa 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
@@ -205,17 +205,32 @@ void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
struct amd_sched_entity *entity)
{
struct amd_sched_rq *rq = entity->rq;
+ int r;
if (!amd_sched_entity_is_initialized(sched, entity))
return;
-
/**
* The client will not queue more IBs during this fini, consume existing
- * queued IBs
+ * queued IBs or discard them on SIGKILL
*/
- wait_event(sched->job_scheduled, amd_sched_entity_is_idle(entity));
-
+ if ((current->flags & PF_SIGNALED) && current->exit_code == SIGKILL)
+ r = -ERESTARTSYS;
+ else
+ r = wait_event_killable(sched->job_scheduled,
+ amd_sched_entity_is_idle(entity));
amd_sched_rq_remove_entity(rq, entity);
+ if (r) {
+ struct amd_sched_job *job;
+
+ /* Park the kernel for a moment to make sure it isn't processing
+ * our enity.
+ */
+ kthread_park(sched->thread);
+ kthread_unpark(sched->thread);
+ while (kfifo_out(&entity->job_queue, &job, sizeof(job)))
+ sched->ops->free_job(job);
+
+ }
kfifo_free(&entity->job_queue);
}
diff --git a/drivers/gpu/drm/arc/arcpgu_crtc.c b/drivers/gpu/drm/arc/arcpgu_crtc.c
index ad9a95916f1f..16903dc7fe0d 100644
--- a/drivers/gpu/drm/arc/arcpgu_crtc.c
+++ b/drivers/gpu/drm/arc/arcpgu_crtc.c
@@ -64,6 +64,20 @@ static const struct drm_crtc_funcs arc_pgu_crtc_funcs = {
.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
};
+static enum drm_mode_status arc_pgu_crtc_mode_valid(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode)
+{
+ struct arcpgu_drm_private *arcpgu = crtc_to_arcpgu_priv(crtc);
+ long rate, clk_rate = mode->clock * 1000;
+ long diff = clk_rate / 200; /* +-0.5% allowed by HDMI spec */
+
+ rate = clk_round_rate(arcpgu->clk, clk_rate);
+ if ((max(rate, clk_rate) - min(rate, clk_rate) < diff) && (rate > 0))
+ return MODE_OK;
+
+ return MODE_NOCLOCK;
+}
+
static void arc_pgu_crtc_mode_set_nofb(struct drm_crtc *crtc)
{
struct arcpgu_drm_private *arcpgu = crtc_to_arcpgu_priv(crtc);
@@ -106,7 +120,8 @@ static void arc_pgu_crtc_mode_set_nofb(struct drm_crtc *crtc)
clk_set_rate(arcpgu->clk, m->crtc_clock * 1000);
}
-static void arc_pgu_crtc_enable(struct drm_crtc *crtc)
+static void arc_pgu_crtc_atomic_enable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
{
struct arcpgu_drm_private *arcpgu = crtc_to_arcpgu_priv(crtc);
@@ -116,7 +131,8 @@ static void arc_pgu_crtc_enable(struct drm_crtc *crtc)
ARCPGU_CTRL_ENABLE_MASK);
}
-static void arc_pgu_crtc_disable(struct drm_crtc *crtc)
+static void arc_pgu_crtc_atomic_disable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
{
struct arcpgu_drm_private *arcpgu = crtc_to_arcpgu_priv(crtc);
@@ -129,20 +145,6 @@ static void arc_pgu_crtc_disable(struct drm_crtc *crtc)
~ARCPGU_CTRL_ENABLE_MASK);
}
-static int arc_pgu_crtc_atomic_check(struct drm_crtc *crtc,
- struct drm_crtc_state *state)
-{
- struct arcpgu_drm_private *arcpgu = crtc_to_arcpgu_priv(crtc);
- struct drm_display_mode *mode = &state->adjusted_mode;
- long rate, clk_rate = mode->clock * 1000;
-
- rate = clk_round_rate(arcpgu->clk, clk_rate);
- if (rate != clk_rate)
- return -EINVAL;
-
- return 0;
-}
-
static void arc_pgu_crtc_atomic_begin(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
@@ -158,15 +160,13 @@ static void arc_pgu_crtc_atomic_begin(struct drm_crtc *crtc,
}
static const struct drm_crtc_helper_funcs arc_pgu_crtc_helper_funcs = {
+ .mode_valid = arc_pgu_crtc_mode_valid,
.mode_set = drm_helper_crtc_mode_set,
.mode_set_base = drm_helper_crtc_mode_set_base,
.mode_set_nofb = arc_pgu_crtc_mode_set_nofb,
- .enable = arc_pgu_crtc_enable,
- .disable = arc_pgu_crtc_disable,
- .prepare = arc_pgu_crtc_disable,
- .commit = arc_pgu_crtc_enable,
- .atomic_check = arc_pgu_crtc_atomic_check,
.atomic_begin = arc_pgu_crtc_atomic_begin,
+ .atomic_enable = arc_pgu_crtc_atomic_enable,
+ .atomic_disable = arc_pgu_crtc_atomic_disable,
};
static void arc_pgu_plane_atomic_update(struct drm_plane *plane,
@@ -218,6 +218,7 @@ static struct drm_plane *arc_pgu_plane_init(struct drm_device *drm)
ret = drm_universal_plane_init(drm, plane, 0xff, &arc_pgu_plane_funcs,
formats, ARRAY_SIZE(formats),
+ NULL,
DRM_PLANE_TYPE_PRIMARY, NULL);
if (ret)
return ERR_PTR(ret);
diff --git a/drivers/gpu/drm/arc/arcpgu_drv.c b/drivers/gpu/drm/arc/arcpgu_drv.c
index 3e43a5d4fb09..289eda54e5aa 100644
--- a/drivers/gpu/drm/arc/arcpgu_drv.c
+++ b/drivers/gpu/drm/arc/arcpgu_drv.c
@@ -31,7 +31,7 @@ static void arcpgu_fb_output_poll_changed(struct drm_device *dev)
drm_fbdev_cma_hotplug_event(arcpgu->fbdev);
}
-static struct drm_mode_config_funcs arcpgu_drm_modecfg_funcs = {
+static const struct drm_mode_config_funcs arcpgu_drm_modecfg_funcs = {
.fb_create = drm_fb_cma_create,
.output_poll_changed = arcpgu_fb_output_poll_changed,
.atomic_check = drm_atomic_helper_check,
@@ -48,29 +48,7 @@ static void arcpgu_setup_mode_config(struct drm_device *drm)
drm->mode_config.funcs = &arcpgu_drm_modecfg_funcs;
}
-static int arcpgu_gem_mmap(struct file *filp, struct vm_area_struct *vma)
-{
- int ret;
-
- ret = drm_gem_mmap(filp, vma);
- if (ret)
- return ret;
-
- vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
- return 0;
-}
-
-static const struct file_operations arcpgu_drm_ops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .release = drm_release,
- .unlocked_ioctl = drm_ioctl,
- .compat_ioctl = drm_compat_ioctl,
- .poll = drm_poll,
- .read = drm_read,
- .llseek = no_llseek,
- .mmap = arcpgu_gem_mmap,
-};
+DEFINE_DRM_GEM_CMA_FOPS(arcpgu_drm_ops);
static void arcpgu_lastclose(struct drm_device *drm)
{
@@ -142,7 +120,7 @@ static int arcpgu_load(struct drm_device *drm)
return -ENODEV;
}
- platform_set_drvdata(pdev, arcpgu);
+ platform_set_drvdata(pdev, drm);
return 0;
}
@@ -160,11 +138,37 @@ static int arcpgu_unload(struct drm_device *drm)
return 0;
}
+#ifdef CONFIG_DEBUG_FS
+static int arcpgu_show_pxlclock(struct seq_file *m, void *arg)
+{
+ struct drm_info_node *node = (struct drm_info_node *)m->private;
+ struct drm_device *drm = node->minor->dev;
+ struct arcpgu_drm_private *arcpgu = drm->dev_private;
+ unsigned long clkrate = clk_get_rate(arcpgu->clk);
+ unsigned long mode_clock = arcpgu->crtc.mode.crtc_clock * 1000;
+
+ seq_printf(m, "hw : %lu\n", clkrate);
+ seq_printf(m, "mode: %lu\n", mode_clock);
+ return 0;
+}
+
+static struct drm_info_list arcpgu_debugfs_list[] = {
+ { "clocks", arcpgu_show_pxlclock, 0 },
+ { "fb", drm_fb_cma_debugfs_show, 0 },
+};
+
+static int arcpgu_debugfs_init(struct drm_minor *minor)
+{
+ return drm_debugfs_create_files(arcpgu_debugfs_list,
+ ARRAY_SIZE(arcpgu_debugfs_list), minor->debugfs_root, minor);
+}
+#endif
+
static struct drm_driver arcpgu_drm_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME |
DRIVER_ATOMIC,
.lastclose = arcpgu_lastclose,
- .name = "drm-arcpgu",
+ .name = "arcpgu",
.desc = "ARC PGU Controller",
.date = "20160219",
.major = 1,
@@ -172,8 +176,6 @@ static struct drm_driver arcpgu_drm_driver = {
.patchlevel = 0,
.fops = &arcpgu_drm_ops,
.dumb_create = drm_gem_cma_dumb_create,
- .dumb_map_offset = drm_gem_cma_dumb_map_offset,
- .dumb_destroy = drm_gem_dumb_destroy,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_free_object_unlocked = drm_gem_cma_free_object,
@@ -185,6 +187,9 @@ static struct drm_driver arcpgu_drm_driver = {
.gem_prime_vmap = drm_gem_cma_prime_vmap,
.gem_prime_vunmap = drm_gem_cma_prime_vunmap,
.gem_prime_mmap = drm_gem_cma_prime_mmap,
+#ifdef CONFIG_DEBUG_FS
+ .debugfs_init = arcpgu_debugfs_init,
+#endif
};
static int arcpgu_probe(struct platform_device *pdev)
diff --git a/drivers/gpu/drm/arm/hdlcd_crtc.c b/drivers/gpu/drm/arm/hdlcd_crtc.c
index d67b6f15e8b8..72b22b805412 100644
--- a/drivers/gpu/drm/arm/hdlcd_crtc.c
+++ b/drivers/gpu/drm/arm/hdlcd_crtc.c
@@ -165,7 +165,8 @@ static void hdlcd_crtc_mode_set_nofb(struct drm_crtc *crtc)
clk_set_rate(hdlcd->clk, m->crtc_clock * 1000);
}
-static void hdlcd_crtc_enable(struct drm_crtc *crtc)
+static void hdlcd_crtc_atomic_enable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
{
struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc);
@@ -175,7 +176,8 @@ static void hdlcd_crtc_enable(struct drm_crtc *crtc)
drm_crtc_vblank_on(crtc);
}
-static void hdlcd_crtc_disable(struct drm_crtc *crtc)
+static void hdlcd_crtc_atomic_disable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
{
struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc);
@@ -218,10 +220,10 @@ static void hdlcd_crtc_atomic_begin(struct drm_crtc *crtc,
}
static const struct drm_crtc_helper_funcs hdlcd_crtc_helper_funcs = {
- .enable = hdlcd_crtc_enable,
- .disable = hdlcd_crtc_disable,
.atomic_check = hdlcd_crtc_atomic_check,
.atomic_begin = hdlcd_crtc_atomic_begin,
+ .atomic_enable = hdlcd_crtc_atomic_enable,
+ .atomic_disable = hdlcd_crtc_atomic_disable,
};
static int hdlcd_plane_atomic_check(struct drm_plane *plane,
@@ -313,6 +315,7 @@ static struct drm_plane *hdlcd_plane_init(struct drm_device *drm)
ret = drm_universal_plane_init(drm, plane, 0xff, &hdlcd_plane_funcs,
formats, ARRAY_SIZE(formats),
+ NULL,
DRM_PLANE_TYPE_PRIMARY, NULL);
if (ret) {
return ERR_PTR(ret);
diff --git a/drivers/gpu/drm/arm/hdlcd_drv.c b/drivers/gpu/drm/arm/hdlcd_drv.c
index d3da87fbd85a..f9bda7b0d2ec 100644
--- a/drivers/gpu/drm/arm/hdlcd_drv.c
+++ b/drivers/gpu/drm/arm/hdlcd_drv.c
@@ -253,8 +253,6 @@ static struct drm_driver hdlcd_driver = {
.gem_free_object_unlocked = drm_gem_cma_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
.dumb_create = drm_gem_cma_dumb_create,
- .dumb_map_offset = drm_gem_cma_dumb_map_offset,
- .dumb_destroy = drm_gem_dumb_destroy,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_export = drm_gem_prime_export,
@@ -343,7 +341,6 @@ err_register:
}
err_fbdev:
drm_kms_helper_poll_fini(drm);
- drm_vblank_cleanup(drm);
err_vblank:
pm_runtime_disable(drm->dev);
err_pm_active:
@@ -375,7 +372,6 @@ static void hdlcd_drm_unbind(struct device *dev)
component_unbind_all(dev, drm);
of_node_put(hdlcd->crtc.port);
hdlcd->crtc.port = NULL;
- drm_vblank_cleanup(drm);
pm_runtime_get_sync(drm->dev);
drm_irq_uninstall(drm);
pm_runtime_put_sync(drm->dev);
diff --git a/drivers/gpu/drm/arm/malidp_crtc.c b/drivers/gpu/drm/arm/malidp_crtc.c
index 4bb38a21efec..3615d18a7ddf 100644
--- a/drivers/gpu/drm/arm/malidp_crtc.c
+++ b/drivers/gpu/drm/arm/malidp_crtc.c
@@ -46,7 +46,8 @@ static enum drm_mode_status malidp_crtc_mode_valid(struct drm_crtc *crtc,
return MODE_OK;
}
-static void malidp_crtc_enable(struct drm_crtc *crtc)
+static void malidp_crtc_atomic_enable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
{
struct malidp_drm *malidp = crtc_to_malidp_device(crtc);
struct malidp_hw_device *hwdev = malidp->dev;
@@ -69,7 +70,8 @@ static void malidp_crtc_enable(struct drm_crtc *crtc)
drm_crtc_vblank_on(crtc);
}
-static void malidp_crtc_disable(struct drm_crtc *crtc)
+static void malidp_crtc_atomic_disable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
{
struct malidp_drm *malidp = crtc_to_malidp_device(crtc);
struct malidp_hw_device *hwdev = malidp->dev;
@@ -408,9 +410,9 @@ static int malidp_crtc_atomic_check(struct drm_crtc *crtc,
static const struct drm_crtc_helper_funcs malidp_crtc_helper_funcs = {
.mode_valid = malidp_crtc_mode_valid,
- .enable = malidp_crtc_enable,
- .disable = malidp_crtc_disable,
.atomic_check = malidp_crtc_atomic_check,
+ .atomic_enable = malidp_crtc_atomic_enable,
+ .atomic_disable = malidp_crtc_atomic_disable,
};
static struct drm_crtc_state *malidp_crtc_duplicate_state(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c
index 01b13d219917..1a57cc28955e 100644
--- a/drivers/gpu/drm/arm/malidp_drv.c
+++ b/drivers/gpu/drm/arm/malidp_drv.c
@@ -225,7 +225,7 @@ static void malidp_atomic_commit_tail(struct drm_atomic_state *state)
drm_atomic_helper_commit_modeset_disables(drm, state);
- for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
+ for_each_old_crtc_in_state(state, crtc, old_crtc_state, i) {
malidp_atomic_commit_update_gamma(crtc, old_crtc_state);
malidp_atomic_commit_update_coloradj(crtc, old_crtc_state);
malidp_atomic_commit_se_config(crtc, old_crtc_state);
@@ -331,8 +331,6 @@ static struct drm_driver malidp_driver = {
.gem_free_object_unlocked = drm_gem_cma_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
.dumb_create = drm_gem_cma_dumb_create,
- .dumb_map_offset = drm_gem_cma_dumb_map_offset,
- .dumb_destroy = drm_gem_dumb_destroy,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_export = drm_gem_prime_export,
diff --git a/drivers/gpu/drm/arm/malidp_planes.c b/drivers/gpu/drm/arm/malidp_planes.c
index 600fa7bd7f52..94e7e3fa3408 100644
--- a/drivers/gpu/drm/arm/malidp_planes.c
+++ b/drivers/gpu/drm/arm/malidp_planes.c
@@ -128,7 +128,6 @@ static void malidp_plane_atomic_print_state(struct drm_printer *p,
static const struct drm_plane_funcs malidp_de_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
- .set_property = drm_atomic_helper_plane_set_property,
.destroy = malidp_de_plane_destroy,
.reset = malidp_plane_reset,
.atomic_duplicate_state = malidp_duplicate_plane_state,
@@ -398,7 +397,7 @@ int malidp_de_planes_init(struct drm_device *drm)
DRM_PLANE_TYPE_OVERLAY;
ret = drm_universal_plane_init(drm, &plane->base, crtcs,
&malidp_de_plane_funcs, formats,
- n, plane_type, NULL);
+ n, NULL, plane_type, NULL);
if (ret < 0)
goto cleanup;
diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c
index 4fe19fde84f9..2a4d163ac76f 100644
--- a/drivers/gpu/drm/armada/armada_crtc.c
+++ b/drivers/gpu/drm/armada/armada_crtc.c
@@ -334,16 +334,6 @@ static void armada_drm_vblank_off(struct armada_crtc *dcrtc)
armada_drm_plane_work_run(dcrtc, dcrtc->crtc.primary);
}
-void armada_drm_crtc_gamma_set(struct drm_crtc *crtc, u16 r, u16 g, u16 b,
- int idx)
-{
-}
-
-void armada_drm_crtc_gamma_get(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
- int idx)
-{
-}
-
/* The mode_config.mutex will be held for this call */
static void armada_drm_crtc_dpms(struct drm_crtc *crtc, int dpms)
{
@@ -1150,13 +1140,13 @@ int armada_drm_plane_init(struct armada_plane *plane)
return 0;
}
-static struct drm_prop_enum_list armada_drm_csc_yuv_enum_list[] = {
+static const struct drm_prop_enum_list armada_drm_csc_yuv_enum_list[] = {
{ CSC_AUTO, "Auto" },
{ CSC_YUV_CCIR601, "CCIR601" },
{ CSC_YUV_CCIR709, "CCIR709" },
};
-static struct drm_prop_enum_list armada_drm_csc_rgb_enum_list[] = {
+static const struct drm_prop_enum_list armada_drm_csc_rgb_enum_list[] = {
{ CSC_AUTO, "Auto" },
{ CSC_RGB_COMPUTER, "Computer system" },
{ CSC_RGB_STUDIO, "Studio" },
@@ -1269,6 +1259,7 @@ static int armada_drm_crtc_create(struct drm_device *drm, struct device *dev,
&armada_primary_plane_funcs,
armada_primary_formats,
ARRAY_SIZE(armada_primary_formats),
+ NULL,
DRM_PLANE_TYPE_PRIMARY, NULL);
if (ret) {
kfree(primary);
@@ -1329,8 +1320,7 @@ armada_lcd_bind(struct device *dev, struct device *master, void *data)
port = of_get_child_by_name(parent, "port");
of_node_put(np);
if (!port) {
- dev_err(dev, "no port node found in %s\n",
- parent->full_name);
+ dev_err(dev, "no port node found in %pOF\n", parent);
return -ENXIO;
}
@@ -1364,7 +1354,7 @@ static int armada_lcd_remove(struct platform_device *pdev)
return 0;
}
-static struct of_device_id armada_lcd_of_match[] = {
+static const struct of_device_id armada_lcd_of_match[] = {
{
.compatible = "marvell,dove-lcd",
.data = &armada510_ops,
diff --git a/drivers/gpu/drm/armada/armada_crtc.h b/drivers/gpu/drm/armada/armada_crtc.h
index 7e8906d3ae26..bab11f483575 100644
--- a/drivers/gpu/drm/armada/armada_crtc.h
+++ b/drivers/gpu/drm/armada/armada_crtc.h
@@ -102,8 +102,6 @@ struct armada_crtc {
};
#define drm_to_armada_crtc(c) container_of(c, struct armada_crtc, crtc)
-void armada_drm_crtc_gamma_set(struct drm_crtc *, u16, u16, u16, int);
-void armada_drm_crtc_gamma_get(struct drm_crtc *, u16 *, u16 *, u16 *, int);
void armada_drm_crtc_update_regs(struct armada_crtc *, struct armada_regs *);
void armada_drm_crtc_plane_disable(struct armada_crtc *dcrtc,
diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c
index e618fab7f519..0b3227c039d7 100644
--- a/drivers/gpu/drm/armada/armada_drv.c
+++ b/drivers/gpu/drm/armada/armada_drv.c
@@ -232,8 +232,8 @@ static void armada_add_endpoints(struct device *dev,
of_node_put(remote);
continue;
} else if (!of_device_is_available(remote->parent)) {
- dev_warn(dev, "parent device of %s is not available\n",
- remote->full_name);
+ dev_warn(dev, "parent device of %pOF is not available\n",
+ remote);
of_node_put(remote);
continue;
}
diff --git a/drivers/gpu/drm/armada/armada_fbdev.c b/drivers/gpu/drm/armada/armada_fbdev.c
index 602dfead8eef..29c7d047b152 100644
--- a/drivers/gpu/drm/armada/armada_fbdev.c
+++ b/drivers/gpu/drm/armada/armada_fbdev.c
@@ -81,7 +81,6 @@ static int armada_fb_create(struct drm_fb_helper *fbh,
strlcpy(info->fix.id, "armada-drmfb", sizeof(info->fix.id));
info->par = fbh;
- info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
info->fbops = &armada_fb_ops;
info->fix.smem_start = obj->phys_addr;
info->fix.smem_len = obj->obj.size;
@@ -118,8 +117,6 @@ static int armada_fb_probe(struct drm_fb_helper *fbh,
}
static const struct drm_fb_helper_funcs armada_fb_helper_funcs = {
- .gamma_set = armada_drm_crtc_gamma_set,
- .gamma_get = armada_drm_crtc_gamma_get,
.fb_probe = armada_fb_probe,
};
diff --git a/drivers/gpu/drm/armada/armada_overlay.c b/drivers/gpu/drm/armada/armada_overlay.c
index e9a29df4b443..edc44910d79f 100644
--- a/drivers/gpu/drm/armada/armada_overlay.c
+++ b/drivers/gpu/drm/armada/armada_overlay.c
@@ -388,7 +388,7 @@ static const uint32_t armada_ovl_formats[] = {
DRM_FORMAT_BGR565,
};
-static struct drm_prop_enum_list armada_drm_colorkey_enum_list[] = {
+static const struct drm_prop_enum_list armada_drm_colorkey_enum_list[] = {
{ CKMODE_DISABLE, "disabled" },
{ CKMODE_Y, "Y component" },
{ CKMODE_U, "U component" },
@@ -460,6 +460,7 @@ int armada_overlay_plane_create(struct drm_device *dev, unsigned long crtcs)
&armada_ovl_plane_funcs,
armada_ovl_formats,
ARRAY_SIZE(armada_ovl_formats),
+ NULL,
DRM_PLANE_TYPE_OVERLAY, NULL);
if (ret) {
kfree(dplane);
diff --git a/drivers/gpu/drm/ast/ast_dp501.c b/drivers/gpu/drm/ast/ast_dp501.c
index 76f07f38b941..749646ae365f 100644
--- a/drivers/gpu/drm/ast/ast_dp501.c
+++ b/drivers/gpu/drm/ast/ast_dp501.c
@@ -4,16 +4,11 @@
#include "ast_drv.h"
MODULE_FIRMWARE("ast_dp501_fw.bin");
-int ast_load_dp501_microcode(struct drm_device *dev)
+static int ast_load_dp501_microcode(struct drm_device *dev)
{
struct ast_private *ast = dev->dev_private;
- static char *fw_name = "ast_dp501_fw.bin";
- int err;
- err = request_firmware(&ast->dp501_fw, fw_name, dev->dev);
- if (err)
- return err;
- return 0;
+ return request_firmware(&ast->dp501_fw, "ast_dp501_fw.bin", dev->dev);
}
static void send_ack(struct ast_private *ast)
@@ -187,7 +182,7 @@ bool ast_backup_fw(struct drm_device *dev, u8 *addr, u32 size)
return false;
}
-bool ast_launch_m68k(struct drm_device *dev)
+static bool ast_launch_m68k(struct drm_device *dev)
{
struct ast_private *ast = dev->dev_private;
u32 i, data, len = 0;
@@ -201,7 +196,11 @@ bool ast_launch_m68k(struct drm_device *dev)
if (ast->dp501_fw_addr) {
fw_addr = ast->dp501_fw_addr;
len = 32*1024;
- } else if (ast->dp501_fw) {
+ } else {
+ if (!ast->dp501_fw &&
+ ast_load_dp501_microcode(dev) < 0)
+ return false;
+
fw_addr = (u8 *)ast->dp501_fw->data;
len = ast->dp501_fw->size;
}
@@ -432,3 +431,11 @@ void ast_init_3rdtx(struct drm_device *dev)
}
}
}
+
+void ast_release_firmware(struct drm_device *dev)
+{
+ struct ast_private *ast = dev->dev_private;
+
+ release_firmware(ast->dp501_fw);
+ ast->dp501_fw = NULL;
+}
diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
index fd7c9eec92e4..69dab82a3771 100644
--- a/drivers/gpu/drm/ast/ast_drv.c
+++ b/drivers/gpu/drm/ast/ast_drv.c
@@ -197,7 +197,6 @@ static struct drm_driver driver = {
.load = ast_driver_load,
.unload = ast_driver_unload,
- .set_busid = drm_pci_set_busid,
.fops = &ast_fops,
.name = DRIVER_NAME,
@@ -210,7 +209,6 @@ static struct drm_driver driver = {
.gem_free_object_unlocked = ast_gem_free_object,
.dumb_create = ast_dumb_create,
.dumb_map_offset = ast_dumb_mmap_offset,
- .dumb_destroy = drm_gem_dumb_destroy,
};
@@ -221,11 +219,11 @@ static int __init ast_init(void)
if (ast_modeset == 0)
return -EINVAL;
- return drm_pci_init(&driver, &ast_pci_driver);
+ return pci_register_driver(&ast_pci_driver);
}
static void __exit ast_exit(void)
{
- drm_pci_exit(&driver, &ast_pci_driver);
+ pci_unregister_driver(&ast_pci_driver);
}
module_init(ast_init);
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index 8880f0b62e9c..e6c4cd3dc50e 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -245,7 +245,6 @@ struct ast_connector {
struct ast_crtc {
struct drm_crtc base;
- u8 lut_r[256], lut_g[256], lut_b[256];
struct drm_gem_object *cursor_bo;
uint64_t cursor_addr;
int cursor_width, cursor_height;
@@ -401,11 +400,10 @@ void ast_post_gpu(struct drm_device *dev);
u32 ast_mindwm(struct ast_private *ast, u32 r);
void ast_moutdwm(struct ast_private *ast, u32 r, u32 v);
/* ast dp501 */
-int ast_load_dp501_microcode(struct drm_device *dev);
void ast_set_dp501_video_output(struct drm_device *dev, u8 mode);
-bool ast_launch_m68k(struct drm_device *dev);
bool ast_backup_fw(struct drm_device *dev, u8 *addr, u32 size);
bool ast_dp501_read_edid(struct drm_device *dev, u8 *ediddata);
u8 ast_get_dp501_max_clk(struct drm_device *dev);
void ast_init_3rdtx(struct drm_device *dev);
+void ast_release_firmware(struct drm_device *dev);
#endif
diff --git a/drivers/gpu/drm/ast/ast_fb.c b/drivers/gpu/drm/ast/ast_fb.c
index 4ad4acd0ccab..0cd827e11fa2 100644
--- a/drivers/gpu/drm/ast/ast_fb.c
+++ b/drivers/gpu/drm/ast/ast_fb.c
@@ -231,7 +231,6 @@ static int astfb_create(struct drm_fb_helper *helper,
strcpy(info->fix.id, "astdrmfb");
- info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
info->fbops = &astfb_ops;
info->apertures->ranges[0].base = pci_resource_start(dev->pdev, 0);
@@ -255,27 +254,7 @@ out:
return ret;
}
-static void ast_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
- u16 blue, int regno)
-{
- struct ast_crtc *ast_crtc = to_ast_crtc(crtc);
- ast_crtc->lut_r[regno] = red >> 8;
- ast_crtc->lut_g[regno] = green >> 8;
- ast_crtc->lut_b[regno] = blue >> 8;
-}
-
-static void ast_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
- u16 *blue, int regno)
-{
- struct ast_crtc *ast_crtc = to_ast_crtc(crtc);
- *red = ast_crtc->lut_r[regno] << 8;
- *green = ast_crtc->lut_g[regno] << 8;
- *blue = ast_crtc->lut_b[regno] << 8;
-}
-
static const struct drm_fb_helper_funcs ast_fb_helper_funcs = {
- .gamma_set = ast_fb_gamma_set,
- .gamma_get = ast_fb_gamma_get,
.fb_probe = astfb_create,
};
@@ -287,7 +266,7 @@ static void ast_fbdev_destroy(struct drm_device *dev,
drm_fb_helper_unregister_fbi(&afbdev->helper);
if (afb->obj) {
- drm_gem_object_unreference_unlocked(afb->obj);
+ drm_gem_object_put_unlocked(afb->obj);
afb->obj = NULL;
}
drm_fb_helper_fini(&afbdev->helper);
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index 262c2c0e43b4..dac355812adc 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -387,9 +387,9 @@ static void ast_user_framebuffer_destroy(struct drm_framebuffer *fb)
{
struct ast_framebuffer *ast_fb = to_ast_framebuffer(fb);
- drm_gem_object_unreference_unlocked(ast_fb->obj);
+ drm_gem_object_put_unlocked(ast_fb->obj);
drm_framebuffer_cleanup(fb);
- kfree(fb);
+ kfree(ast_fb);
}
static const struct drm_framebuffer_funcs ast_fb_funcs = {
@@ -429,13 +429,13 @@ ast_user_framebuffer_create(struct drm_device *dev,
ast_fb = kzalloc(sizeof(*ast_fb), GFP_KERNEL);
if (!ast_fb) {
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return ERR_PTR(-ENOMEM);
}
ret = ast_framebuffer_init(dev, ast_fb, mode_cmd, obj);
if (ret) {
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
kfree(ast_fb);
return ERR_PTR(ret);
}
@@ -576,6 +576,7 @@ void ast_driver_unload(struct drm_device *dev)
{
struct ast_private *ast = dev->dev_private;
+ ast_release_firmware(dev);
kfree(ast->dp501_fw_addr);
ast_mode_fini(dev);
ast_fbdev_fini(dev);
@@ -627,7 +628,7 @@ int ast_dumb_create(struct drm_file *file,
return ret;
ret = drm_gem_handle_create(file, gobj, &handle);
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
if (ret)
return ret;
@@ -675,7 +676,7 @@ ast_dumb_mmap_offset(struct drm_file *file,
bo = gem_to_ast_bo(obj);
*offset = ast_bo_mmap_offset(bo);
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return 0;
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index aaef0a652f10..6f3849ec0c1d 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -63,15 +63,18 @@ static inline void ast_load_palette_index(struct ast_private *ast,
static void ast_crtc_load_lut(struct drm_crtc *crtc)
{
struct ast_private *ast = crtc->dev->dev_private;
- struct ast_crtc *ast_crtc = to_ast_crtc(crtc);
+ u16 *r, *g, *b;
int i;
if (!crtc->enabled)
return;
+ r = crtc->gamma_store;
+ g = r + crtc->gamma_size;
+ b = g + crtc->gamma_size;
+
for (i = 0; i < 256; i++)
- ast_load_palette_index(ast, i, ast_crtc->lut_r[i],
- ast_crtc->lut_g[i], ast_crtc->lut_b[i]);
+ ast_load_palette_index(ast, i, *r++ >> 8, *g++ >> 8, *b++ >> 8);
}
static bool ast_get_vbios_mode_info(struct drm_crtc *crtc, struct drm_display_mode *mode,
@@ -613,7 +616,23 @@ static int ast_crtc_mode_set(struct drm_crtc *crtc,
static void ast_crtc_disable(struct drm_crtc *crtc)
{
+ int ret;
+ DRM_DEBUG_KMS("\n");
+ ast_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+ if (crtc->primary->fb) {
+ struct ast_framebuffer *ast_fb = to_ast_framebuffer(crtc->primary->fb);
+ struct drm_gem_object *obj = ast_fb->obj;
+ struct ast_bo *bo = gem_to_ast_bo(obj);
+
+ ret = ast_bo_reserve(bo, false);
+ if (ret)
+ return;
+
+ ast_bo_push_sysram(bo);
+ ast_bo_unreserve(bo);
+ }
+ crtc->primary->fb = NULL;
}
static void ast_crtc_prepare(struct drm_crtc *crtc)
@@ -633,7 +652,6 @@ static const struct drm_crtc_helper_funcs ast_crtc_helper_funcs = {
.mode_set = ast_crtc_mode_set,
.mode_set_base = ast_crtc_mode_set_base,
.disable = ast_crtc_disable,
- .load_lut = ast_crtc_load_lut,
.prepare = ast_crtc_prepare,
.commit = ast_crtc_commit,
@@ -648,15 +666,6 @@ static int ast_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
u16 *blue, uint32_t size,
struct drm_modeset_acquire_ctx *ctx)
{
- struct ast_crtc *ast_crtc = to_ast_crtc(crtc);
- int i;
-
- /* userspace palettes are always correct as is */
- for (i = 0; i < size; i++) {
- ast_crtc->lut_r[i] = red[i] >> 8;
- ast_crtc->lut_g[i] = green[i] >> 8;
- ast_crtc->lut_b[i] = blue[i] >> 8;
- }
ast_crtc_load_lut(crtc);
return 0;
@@ -681,7 +690,6 @@ static const struct drm_crtc_funcs ast_crtc_funcs = {
static int ast_crtc_init(struct drm_device *dev)
{
struct ast_crtc *crtc;
- int i;
crtc = kzalloc(sizeof(struct ast_crtc), GFP_KERNEL);
if (!crtc)
@@ -690,12 +698,6 @@ static int ast_crtc_init(struct drm_device *dev)
drm_crtc_init(dev, &crtc->base, &ast_crtc_funcs);
drm_mode_crtc_set_gamma_size(&crtc->base, 256);
drm_crtc_helper_add(&crtc->base, &ast_crtc_helper_funcs);
-
- for (i = 0; i < 256; i++) {
- crtc->lut_r[i] = i;
- crtc->lut_g[i] = i;
- crtc->lut_b[i] = i;
- }
return 0;
}
@@ -948,7 +950,7 @@ static void ast_cursor_fini(struct drm_device *dev)
{
struct ast_private *ast = dev->dev_private;
ttm_bo_kunmap(&ast->cache_kmap);
- drm_gem_object_unreference_unlocked(ast->cursor_cache);
+ drm_gem_object_put_unlocked(ast->cursor_cache);
}
int ast_mode_init(struct drm_device *dev)
@@ -1213,10 +1215,10 @@ static int ast_cursor_set(struct drm_crtc *crtc,
ast_show_cursor(crtc);
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return 0;
fail:
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return ret;
}
diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c
index 58084985e6cf..696a15dc2f3f 100644
--- a/drivers/gpu/drm/ast/ast_ttm.c
+++ b/drivers/gpu/drm/ast/ast_ttm.c
@@ -323,10 +323,8 @@ int ast_bo_create(struct drm_device *dev, int size, int align,
return -ENOMEM;
ret = drm_gem_object_init(dev, &astbo->gem, size);
- if (ret) {
- kfree(astbo);
- return ret;
- }
+ if (ret)
+ goto error;
astbo->bo.bdev = &ast->ttm.bdev;
@@ -340,10 +338,13 @@ int ast_bo_create(struct drm_device *dev, int size, int align,
align >> PAGE_SHIFT, false, NULL, acc_size,
NULL, NULL, ast_bo_ttm_destroy);
if (ret)
- return ret;
+ goto error;
*pastbo = astbo;
return 0;
+error:
+ kfree(astbo);
+ return ret;
}
static inline u64 ast_bo_gpu_offset(struct ast_bo *bo)
@@ -376,7 +377,7 @@ int ast_bo_pin(struct ast_bo *bo, u32 pl_flag, u64 *gpu_addr)
int ast_bo_unpin(struct ast_bo *bo)
{
- int i, ret;
+ int i;
if (!bo->pin_count) {
DRM_ERROR("unpin bad %p\n", bo);
return 0;
@@ -387,11 +388,7 @@ int ast_bo_unpin(struct ast_bo *bo)
for (i = 0; i < bo->placement.num_placement ; i++)
bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
- ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
- if (ret)
- return ret;
-
- return 0;
+ return ttm_bo_validate(&bo->bo, &bo->placement, false, false);
}
int ast_bo_push_sysram(struct ast_bo *bo)
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
index 53489859997b..d73281095fac 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
@@ -149,7 +149,8 @@ atmel_hlcdc_crtc_mode_valid(struct drm_crtc *c,
return atmel_hlcdc_dc_mode_valid(crtc->dc, mode);
}
-static void atmel_hlcdc_crtc_disable(struct drm_crtc *c)
+static void atmel_hlcdc_crtc_atomic_disable(struct drm_crtc *c,
+ struct drm_crtc_state *old_state)
{
struct drm_device *dev = c->dev;
struct atmel_hlcdc_crtc *crtc = drm_crtc_to_atmel_hlcdc_crtc(c);
@@ -183,7 +184,8 @@ static void atmel_hlcdc_crtc_disable(struct drm_crtc *c)
pm_runtime_put_sync(dev->dev);
}
-static void atmel_hlcdc_crtc_enable(struct drm_crtc *c)
+static void atmel_hlcdc_crtc_atomic_enable(struct drm_crtc *c,
+ struct drm_crtc_state *old_state)
{
struct drm_device *dev = c->dev;
struct atmel_hlcdc_crtc *crtc = drm_crtc_to_atmel_hlcdc_crtc(c);
@@ -235,7 +237,7 @@ static int atmel_hlcdc_crtc_select_output_mode(struct drm_crtc_state *state)
crtc = drm_crtc_to_atmel_hlcdc_crtc(state->crtc);
- for_each_connector_in_state(state->state, connector, cstate, i) {
+ for_each_new_connector_in_state(state->state, connector, cstate, i) {
struct drm_display_info *info = &connector->display_info;
unsigned int supported_fmts = 0;
int j;
@@ -319,11 +321,11 @@ static const struct drm_crtc_helper_funcs lcdc_crtc_helper_funcs = {
.mode_set = drm_helper_crtc_mode_set,
.mode_set_nofb = atmel_hlcdc_crtc_mode_set_nofb,
.mode_set_base = drm_helper_crtc_mode_set_base,
- .disable = atmel_hlcdc_crtc_disable,
- .enable = atmel_hlcdc_crtc_enable,
.atomic_check = atmel_hlcdc_crtc_atomic_check,
.atomic_begin = atmel_hlcdc_crtc_atomic_begin,
.atomic_flush = atmel_hlcdc_crtc_atomic_flush,
+ .atomic_enable = atmel_hlcdc_crtc_atomic_enable,
+ .atomic_disable = atmel_hlcdc_crtc_atomic_disable,
};
static void atmel_hlcdc_crtc_destroy(struct drm_crtc *c)
@@ -429,6 +431,7 @@ static const struct drm_crtc_funcs atmel_hlcdc_crtc_funcs = {
.atomic_destroy_state = atmel_hlcdc_crtc_destroy_state,
.enable_vblank = atmel_hlcdc_crtc_enable_vblank,
.disable_vblank = atmel_hlcdc_crtc_disable_vblank,
+ .gamma_set = drm_atomic_helper_legacy_gamma_set,
};
int atmel_hlcdc_crtc_create(struct drm_device *dev)
@@ -484,6 +487,10 @@ int atmel_hlcdc_crtc_create(struct drm_device *dev)
drm_crtc_helper_add(&crtc->base, &lcdc_crtc_helper_funcs);
drm_crtc_vblank_reset(&crtc->base);
+ drm_mode_crtc_set_gamma_size(&crtc->base, ATMEL_HLCDC_CLUT_SIZE);
+ drm_crtc_enable_color_mgmt(&crtc->base, 0, false,
+ ATMEL_HLCDC_CLUT_SIZE);
+
dc->crtc = &crtc->base;
return 0;
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
index 30dbffdb45a3..74d66e11f688 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
@@ -42,6 +42,7 @@ static const struct atmel_hlcdc_layer_desc atmel_hlcdc_at91sam9n12_layers[] = {
.default_color = 3,
.general_config = 4,
},
+ .clut_offset = 0x400,
},
};
@@ -73,6 +74,7 @@ static const struct atmel_hlcdc_layer_desc atmel_hlcdc_at91sam9x5_layers[] = {
.disc_pos = 5,
.disc_size = 6,
},
+ .clut_offset = 0x400,
},
{
.name = "overlay1",
@@ -91,6 +93,7 @@ static const struct atmel_hlcdc_layer_desc atmel_hlcdc_at91sam9x5_layers[] = {
.chroma_key_mask = 8,
.general_config = 9,
},
+ .clut_offset = 0x800,
},
{
.name = "high-end-overlay",
@@ -112,6 +115,7 @@ static const struct atmel_hlcdc_layer_desc atmel_hlcdc_at91sam9x5_layers[] = {
.scaler_config = 13,
.csc = 14,
},
+ .clut_offset = 0x1000,
},
{
.name = "cursor",
@@ -131,6 +135,7 @@ static const struct atmel_hlcdc_layer_desc atmel_hlcdc_at91sam9x5_layers[] = {
.chroma_key_mask = 8,
.general_config = 9,
},
+ .clut_offset = 0x1400,
},
};
@@ -162,6 +167,7 @@ static const struct atmel_hlcdc_layer_desc atmel_hlcdc_sama5d3_layers[] = {
.disc_pos = 5,
.disc_size = 6,
},
+ .clut_offset = 0x600,
},
{
.name = "overlay1",
@@ -180,6 +186,7 @@ static const struct atmel_hlcdc_layer_desc atmel_hlcdc_sama5d3_layers[] = {
.chroma_key_mask = 8,
.general_config = 9,
},
+ .clut_offset = 0xa00,
},
{
.name = "overlay2",
@@ -198,6 +205,7 @@ static const struct atmel_hlcdc_layer_desc atmel_hlcdc_sama5d3_layers[] = {
.chroma_key_mask = 8,
.general_config = 9,
},
+ .clut_offset = 0xe00,
},
{
.name = "high-end-overlay",
@@ -223,6 +231,7 @@ static const struct atmel_hlcdc_layer_desc atmel_hlcdc_sama5d3_layers[] = {
},
.csc = 14,
},
+ .clut_offset = 0x1200,
},
{
.name = "cursor",
@@ -244,6 +253,7 @@ static const struct atmel_hlcdc_layer_desc atmel_hlcdc_sama5d3_layers[] = {
.general_config = 9,
.scaler_config = 13,
},
+ .clut_offset = 0x1600,
},
};
@@ -275,6 +285,7 @@ static const struct atmel_hlcdc_layer_desc atmel_hlcdc_sama5d4_layers[] = {
.disc_pos = 5,
.disc_size = 6,
},
+ .clut_offset = 0x600,
},
{
.name = "overlay1",
@@ -293,6 +304,7 @@ static const struct atmel_hlcdc_layer_desc atmel_hlcdc_sama5d4_layers[] = {
.chroma_key_mask = 8,
.general_config = 9,
},
+ .clut_offset = 0xa00,
},
{
.name = "overlay2",
@@ -311,6 +323,7 @@ static const struct atmel_hlcdc_layer_desc atmel_hlcdc_sama5d4_layers[] = {
.chroma_key_mask = 8,
.general_config = 9,
},
+ .clut_offset = 0xe00,
},
{
.name = "high-end-overlay",
@@ -336,6 +349,7 @@ static const struct atmel_hlcdc_layer_desc atmel_hlcdc_sama5d4_layers[] = {
},
.csc = 14,
},
+ .clut_offset = 0x1200,
},
};
@@ -451,8 +465,7 @@ static void atmel_hlcdc_fb_output_poll_changed(struct drm_device *dev)
{
struct atmel_hlcdc_dc *dc = dev->dev_private;
- if (dc->fbdev)
- drm_fbdev_cma_hotplug_event(dc->fbdev);
+ drm_fbdev_cma_hotplug_event(dc->fbdev);
}
struct atmel_hlcdc_dc_commit {
@@ -526,14 +539,13 @@ static int atmel_hlcdc_dc_atomic_commit(struct drm_device *dev,
dc->commit.pending = true;
spin_unlock(&dc->commit.wait.lock);
- if (ret) {
- kfree(commit);
- goto error;
- }
+ if (ret)
+ goto err_free;
- /* Swap the state, this is the point of no return. */
- drm_atomic_helper_swap_state(state, true);
+ /* We have our own synchronization through the commit lock. */
+ BUG_ON(drm_atomic_helper_swap_state(state, false) < 0);
+ /* Swap state succeeded, this is the point of no return. */
drm_atomic_state_get(state);
if (async)
queue_work(dc->wq, &commit->work);
@@ -542,6 +554,8 @@ static int atmel_hlcdc_dc_atomic_commit(struct drm_device *dev,
return 0;
+err_free:
+ kfree(commit);
error:
drm_atomic_helper_cleanup_planes(dev, state);
return ret;
@@ -747,8 +761,6 @@ static struct drm_driver atmel_hlcdc_dc_driver = {
.gem_prime_vunmap = drm_gem_cma_prime_vunmap,
.gem_prime_mmap = drm_gem_cma_prime_mmap,
.dumb_create = drm_gem_cma_dumb_create,
- .dumb_map_offset = drm_gem_cma_dumb_map_offset,
- .dumb_destroy = drm_gem_dumb_destroy,
.fops = &fops,
.name = "atmel-hlcdc",
.desc = "Atmel HLCD Controller DRM",
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h
index b0596a84c1b8..4237b0446721 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h
@@ -88,6 +88,11 @@
#define ATMEL_HLCDC_YUV422SWP BIT(17)
#define ATMEL_HLCDC_DSCALEOPT BIT(20)
+#define ATMEL_HLCDC_C1_MODE ATMEL_HLCDC_CLUT_MODE(0)
+#define ATMEL_HLCDC_C2_MODE ATMEL_HLCDC_CLUT_MODE(1)
+#define ATMEL_HLCDC_C4_MODE ATMEL_HLCDC_CLUT_MODE(2)
+#define ATMEL_HLCDC_C8_MODE ATMEL_HLCDC_CLUT_MODE(3)
+
#define ATMEL_HLCDC_XRGB4444_MODE ATMEL_HLCDC_RGB_MODE(0)
#define ATMEL_HLCDC_ARGB4444_MODE ATMEL_HLCDC_RGB_MODE(1)
#define ATMEL_HLCDC_RGBA4444_MODE ATMEL_HLCDC_RGB_MODE(2)
@@ -142,6 +147,8 @@
#define ATMEL_HLCDC_DMA_CHANNEL_DSCR_DONE BIT(2)
#define ATMEL_HLCDC_DMA_CHANNEL_DSCR_OVERRUN BIT(3)
+#define ATMEL_HLCDC_CLUT_SIZE 256
+
#define ATMEL_HLCDC_MAX_LAYERS 6
/**
@@ -259,6 +266,7 @@ struct atmel_hlcdc_layer_desc {
int id;
int regs_offset;
int cfgs_offset;
+ int clut_offset;
struct atmel_hlcdc_formats *formats;
struct atmel_hlcdc_layer_cfg_layout layout;
int max_width;
@@ -414,6 +422,14 @@ static inline u32 atmel_hlcdc_layer_read_cfg(struct atmel_hlcdc_layer *layer,
(cfgid * sizeof(u32)));
}
+static inline void atmel_hlcdc_layer_write_clut(struct atmel_hlcdc_layer *layer,
+ unsigned int c, u32 val)
+{
+ regmap_write(layer->regmap,
+ layer->desc->clut_offset + c * sizeof(u32),
+ val);
+}
+
static inline void atmel_hlcdc_layer_init(struct atmel_hlcdc_layer *layer,
const struct atmel_hlcdc_layer_desc *desc,
struct regmap *regmap)
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
index 1124200bb280..703c2d13603f 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
@@ -83,6 +83,7 @@ drm_plane_state_to_atmel_hlcdc_plane_state(struct drm_plane_state *s)
#define SUBPIXEL_MASK 0xffff
static uint32_t rgb_formats[] = {
+ DRM_FORMAT_C8,
DRM_FORMAT_XRGB4444,
DRM_FORMAT_ARGB4444,
DRM_FORMAT_RGBA4444,
@@ -100,6 +101,7 @@ struct atmel_hlcdc_formats atmel_hlcdc_plane_rgb_formats = {
};
static uint32_t rgb_and_yuv_formats[] = {
+ DRM_FORMAT_C8,
DRM_FORMAT_XRGB4444,
DRM_FORMAT_ARGB4444,
DRM_FORMAT_RGBA4444,
@@ -128,6 +130,9 @@ struct atmel_hlcdc_formats atmel_hlcdc_plane_rgb_and_yuv_formats = {
static int atmel_hlcdc_format_to_plane_mode(u32 format, u32 *mode)
{
switch (format) {
+ case DRM_FORMAT_C8:
+ *mode = ATMEL_HLCDC_C8_MODE;
+ break;
case DRM_FORMAT_XRGB4444:
*mode = ATMEL_HLCDC_XRGB4444_MODE;
break;
@@ -424,6 +429,29 @@ static void atmel_hlcdc_plane_update_format(struct atmel_hlcdc_plane *plane,
ATMEL_HLCDC_LAYER_FORMAT_CFG, cfg);
}
+static void atmel_hlcdc_plane_update_clut(struct atmel_hlcdc_plane *plane)
+{
+ struct drm_crtc *crtc = plane->base.crtc;
+ struct drm_color_lut *lut;
+ int idx;
+
+ if (!crtc || !crtc->state)
+ return;
+
+ if (!crtc->state->color_mgmt_changed || !crtc->state->gamma_lut)
+ return;
+
+ lut = (struct drm_color_lut *)crtc->state->gamma_lut->data;
+
+ for (idx = 0; idx < ATMEL_HLCDC_CLUT_SIZE; idx++, lut++) {
+ u32 val = ((lut->red << 8) & 0xff0000) |
+ (lut->green & 0xff00) |
+ (lut->blue >> 8);
+
+ atmel_hlcdc_layer_write_clut(&plane->layer, idx, val);
+ }
+}
+
static void atmel_hlcdc_plane_update_buffers(struct atmel_hlcdc_plane *plane,
struct atmel_hlcdc_plane_state *state)
{
@@ -768,6 +796,7 @@ static void atmel_hlcdc_plane_atomic_update(struct drm_plane *p,
atmel_hlcdc_plane_update_pos_and_size(plane, state);
atmel_hlcdc_plane_update_general_settings(plane, state);
atmel_hlcdc_plane_update_format(plane, state);
+ atmel_hlcdc_plane_update_clut(plane);
atmel_hlcdc_plane_update_buffers(plane, state);
atmel_hlcdc_plane_update_disc_area(plane, state);
@@ -809,7 +838,7 @@ static void atmel_hlcdc_plane_destroy(struct drm_plane *p)
struct atmel_hlcdc_plane *plane = drm_plane_to_atmel_hlcdc_plane(p);
if (plane->base.fb)
- drm_framebuffer_unreference(plane->base.fb);
+ drm_framebuffer_put(plane->base.fb);
drm_plane_cleanup(p);
}
@@ -911,7 +940,7 @@ void atmel_hlcdc_plane_irq(struct atmel_hlcdc_plane *plane)
desc->name);
}
-static struct drm_plane_helper_funcs atmel_hlcdc_layer_plane_helper_funcs = {
+static const struct drm_plane_helper_funcs atmel_hlcdc_layer_plane_helper_funcs = {
.atomic_check = atmel_hlcdc_plane_atomic_check,
.atomic_update = atmel_hlcdc_plane_atomic_update,
.atomic_disable = atmel_hlcdc_plane_atomic_disable,
@@ -958,7 +987,7 @@ static void atmel_hlcdc_plane_reset(struct drm_plane *p)
state = drm_plane_state_to_atmel_hlcdc_plane_state(p->state);
if (state->base.fb)
- drm_framebuffer_unreference(state->base.fb);
+ drm_framebuffer_put(state->base.fb);
kfree(state);
p->state = NULL;
@@ -996,7 +1025,7 @@ atmel_hlcdc_plane_atomic_duplicate_state(struct drm_plane *p)
}
if (copy->base.fb)
- drm_framebuffer_reference(copy->base.fb);
+ drm_framebuffer_get(copy->base.fb);
return &copy->base;
}
@@ -1015,15 +1044,14 @@ static void atmel_hlcdc_plane_atomic_destroy_state(struct drm_plane *p,
}
if (s->fb)
- drm_framebuffer_unreference(s->fb);
+ drm_framebuffer_put(s->fb);
kfree(state);
}
-static struct drm_plane_funcs layer_plane_funcs = {
+static const struct drm_plane_funcs layer_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
- .set_property = drm_atomic_helper_plane_set_property,
.destroy = atmel_hlcdc_plane_destroy,
.reset = atmel_hlcdc_plane_reset,
.atomic_duplicate_state = atmel_hlcdc_plane_atomic_duplicate_state,
@@ -1058,7 +1086,8 @@ static int atmel_hlcdc_plane_create(struct drm_device *dev,
ret = drm_universal_plane_init(dev, &plane->base, 0,
&layer_plane_funcs,
desc->formats->formats,
- desc->formats->nformats, type, NULL);
+ desc->formats->nformats,
+ NULL, type, NULL);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/bochs/bochs_drv.c b/drivers/gpu/drm/bochs/bochs_drv.c
index aa342515ddf4..7b20318483e4 100644
--- a/drivers/gpu/drm/bochs/bochs_drv.c
+++ b/drivers/gpu/drm/bochs/bochs_drv.c
@@ -84,7 +84,6 @@ static struct drm_driver bochs_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET,
.load = bochs_load,
.unload = bochs_unload,
- .set_busid = drm_pci_set_busid,
.fops = &bochs_fops,
.name = "bochs-drm",
.desc = "bochs dispi vga interface (qemu stdvga)",
@@ -94,7 +93,6 @@ static struct drm_driver bochs_driver = {
.gem_free_object_unlocked = bochs_gem_free_object,
.dumb_create = bochs_dumb_create,
.dumb_map_offset = bochs_dumb_mmap_offset,
- .dumb_destroy = drm_gem_dumb_destroy,
};
/* ---------------------------------------------------------------------- */
@@ -224,12 +222,12 @@ static int __init bochs_init(void)
if (bochs_modeset == 0)
return -EINVAL;
- return drm_pci_init(&bochs_driver, &bochs_pci_driver);
+ return pci_register_driver(&bochs_pci_driver);
}
static void __exit bochs_exit(void)
{
- drm_pci_exit(&bochs_driver, &bochs_pci_driver);
+ pci_unregister_driver(&bochs_pci_driver);
}
module_init(bochs_init);
diff --git a/drivers/gpu/drm/bochs/bochs_fbdev.c b/drivers/gpu/drm/bochs/bochs_fbdev.c
index c38deffa14de..14eb8d0d5a00 100644
--- a/drivers/gpu/drm/bochs/bochs_fbdev.c
+++ b/drivers/gpu/drm/bochs/bochs_fbdev.c
@@ -23,9 +23,9 @@ static int bochsfb_mmap(struct fb_info *info,
static struct fb_ops bochsfb_ops = {
.owner = THIS_MODULE,
DRM_FB_HELPER_DEFAULT_OPS,
- .fb_fillrect = drm_fb_helper_sys_fillrect,
- .fb_copyarea = drm_fb_helper_sys_copyarea,
- .fb_imageblit = drm_fb_helper_sys_imageblit,
+ .fb_fillrect = drm_fb_helper_cfb_fillrect,
+ .fb_copyarea = drm_fb_helper_cfb_copyarea,
+ .fb_imageblit = drm_fb_helper_cfb_imageblit,
.fb_mmap = bochsfb_mmap,
};
@@ -118,7 +118,6 @@ static int bochsfb_create(struct drm_fb_helper *helper,
strcpy(info->fix.id, "bochsdrmfb");
- info->flags = FBINFO_DEFAULT;
info->fbops = &bochsfb_ops;
drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth);
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
index f75ab6278113..b2431aee7887 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
@@ -785,8 +785,7 @@ adv7511_connector_detect(struct drm_connector *connector, bool force)
return adv7511_detect(adv, connector);
}
-static struct drm_connector_funcs adv7511_connector_funcs = {
- .dpms = drm_atomic_helper_connector_dpms,
+static const struct drm_connector_funcs adv7511_connector_funcs = {
.fill_modes = drm_helper_probe_single_connector_modes,
.detect = adv7511_connector_detect,
.destroy = drm_connector_cleanup,
@@ -857,7 +856,7 @@ static int adv7511_bridge_attach(struct drm_bridge *bridge)
return ret;
}
-static struct drm_bridge_funcs adv7511_bridge_funcs = {
+static const struct drm_bridge_funcs adv7511_bridge_funcs = {
.enable = adv7511_bridge_enable,
.disable = adv7511_bridge_disable,
.mode_set = adv7511_bridge_mode_set,
@@ -1126,11 +1125,7 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
adv7511->bridge.funcs = &adv7511_bridge_funcs;
adv7511->bridge.of_node = dev->of_node;
- ret = drm_bridge_add(&adv7511->bridge);
- if (ret) {
- dev_err(dev, "failed to add adv7511 bridge\n");
- goto err_unregister_cec;
- }
+ drm_bridge_add(&adv7511->bridge);
adv7511_audio_init(dev, adv7511);
diff --git a/drivers/gpu/drm/bridge/analogix-anx78xx.c b/drivers/gpu/drm/bridge/analogix-anx78xx.c
index 9006578b9789..9385eb0b1ee4 100644
--- a/drivers/gpu/drm/bridge/analogix-anx78xx.c
+++ b/drivers/gpu/drm/bridge/analogix-anx78xx.c
@@ -1002,7 +1002,6 @@ static enum drm_connector_status anx78xx_detect(struct drm_connector *connector,
}
static const struct drm_connector_funcs anx78xx_connector_funcs = {
- .dpms = drm_atomic_helper_connector_dpms,
.fill_modes = drm_helper_probe_single_connector_modes,
.detect = anx78xx_detect,
.destroy = drm_connector_cleanup,
@@ -1097,7 +1096,8 @@ static void anx78xx_bridge_mode_set(struct drm_bridge *bridge,
mutex_lock(&anx78xx->lock);
- err = drm_hdmi_avi_infoframe_from_display_mode(&frame, adjusted_mode);
+ err = drm_hdmi_avi_infoframe_from_display_mode(&frame, adjusted_mode,
+ false);
if (err) {
DRM_ERROR("Failed to setup AVI infoframe: %d\n", err);
goto unlock;
@@ -1438,11 +1438,7 @@ static int anx78xx_i2c_probe(struct i2c_client *client,
anx78xx->bridge.funcs = &anx78xx_bridge_funcs;
- err = drm_bridge_add(&anx78xx->bridge);
- if (err < 0) {
- DRM_ERROR("Failed to add drm bridge: %d\n", err);
- goto err_poweroff;
- }
+ drm_bridge_add(&anx78xx->bridge);
/* If cable is pulled out, just poweroff and wait for HPD event */
if (!gpiod_get_value(anx78xx->pdata.gpiod_hpd))
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
index 4c758ed51939..5dd3f1cd074a 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
@@ -1005,7 +1005,6 @@ analogix_dp_detect(struct drm_connector *connector, bool force)
}
static const struct drm_connector_funcs analogix_dp_connector_funcs = {
- .dpms = drm_atomic_helper_connector_dpms,
.fill_modes = drm_helper_probe_single_connector_modes,
.detect = analogix_dp_detect,
.destroy = drm_connector_cleanup,
diff --git a/drivers/gpu/drm/bridge/dumb-vga-dac.c b/drivers/gpu/drm/bridge/dumb-vga-dac.c
index 831a606c4706..de5e7dee7ad6 100644
--- a/drivers/gpu/drm/bridge/dumb-vga-dac.c
+++ b/drivers/gpu/drm/bridge/dumb-vga-dac.c
@@ -92,7 +92,6 @@ dumb_vga_connector_detect(struct drm_connector *connector, bool force)
}
static const struct drm_connector_funcs dumb_vga_con_funcs = {
- .dpms = drm_atomic_helper_connector_dpms,
.detect = dumb_vga_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = drm_connector_cleanup,
@@ -177,7 +176,6 @@ static struct i2c_adapter *dumb_vga_retrieve_ddc(struct device *dev)
static int dumb_vga_probe(struct platform_device *pdev)
{
struct dumb_vga *vga;
- int ret;
vga = devm_kzalloc(&pdev->dev, sizeof(*vga), GFP_KERNEL);
if (!vga)
@@ -186,7 +184,7 @@ static int dumb_vga_probe(struct platform_device *pdev)
vga->vdd = devm_regulator_get_optional(&pdev->dev, "vdd");
if (IS_ERR(vga->vdd)) {
- ret = PTR_ERR(vga->vdd);
+ int ret = PTR_ERR(vga->vdd);
if (ret == -EPROBE_DEFER)
return -EPROBE_DEFER;
vga->vdd = NULL;
@@ -207,11 +205,9 @@ static int dumb_vga_probe(struct platform_device *pdev)
vga->bridge.funcs = &dumb_vga_bridge_funcs;
vga->bridge.of_node = pdev->dev.of_node;
- ret = drm_bridge_add(&vga->bridge);
- if (ret && !IS_ERR(vga->ddc))
- i2c_put_adapter(vga->ddc);
+ drm_bridge_add(&vga->bridge);
- return ret;
+ return 0;
}
static int dumb_vga_remove(struct platform_device *pdev)
diff --git a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
index 11f11086a68f..7ccadba7c98c 100644
--- a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
+++ b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
@@ -193,7 +193,6 @@ static enum drm_connector_status ge_b850v3_lvds_detect(
}
static const struct drm_connector_funcs ge_b850v3_lvds_connector_funcs = {
- .dpms = drm_atomic_helper_connector_dpms,
.fill_modes = drm_helper_probe_single_connector_modes,
.detect = ge_b850v3_lvds_detect,
.destroy = drm_connector_cleanup,
diff --git a/drivers/gpu/drm/bridge/nxp-ptn3460.c b/drivers/gpu/drm/bridge/nxp-ptn3460.c
index 4f64e717e01b..d64a3283822a 100644
--- a/drivers/gpu/drm/bridge/nxp-ptn3460.c
+++ b/drivers/gpu/drm/bridge/nxp-ptn3460.c
@@ -238,7 +238,6 @@ static const struct drm_connector_helper_funcs ptn3460_connector_helper_funcs =
};
static const struct drm_connector_funcs ptn3460_connector_funcs = {
- .dpms = drm_atomic_helper_connector_dpms,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = drm_connector_cleanup,
.reset = drm_atomic_helper_connector_reset,
@@ -332,11 +331,7 @@ static int ptn3460_probe(struct i2c_client *client,
ptn_bridge->bridge.funcs = &ptn3460_bridge_funcs;
ptn_bridge->bridge.of_node = dev->of_node;
- ret = drm_bridge_add(&ptn_bridge->bridge);
- if (ret) {
- DRM_ERROR("Failed to add bridge\n");
- return ret;
- }
+ drm_bridge_add(&ptn_bridge->bridge);
i2c_set_clientdata(client, ptn_bridge);
diff --git a/drivers/gpu/drm/bridge/panel.c b/drivers/gpu/drm/bridge/panel.c
index 67fe19e5a9c6..e0cca19b4044 100644
--- a/drivers/gpu/drm/bridge/panel.c
+++ b/drivers/gpu/drm/bridge/panel.c
@@ -50,7 +50,6 @@ panel_bridge_connector_helper_funcs = {
};
static const struct drm_connector_funcs panel_bridge_connector_funcs = {
- .dpms = drm_atomic_helper_connector_dpms,
.reset = drm_atomic_helper_connector_reset,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = drm_connector_cleanup,
@@ -158,7 +157,6 @@ struct drm_bridge *drm_panel_bridge_add(struct drm_panel *panel,
u32 connector_type)
{
struct panel_bridge *panel_bridge;
- int ret;
if (!panel)
return ERR_PTR(-EINVAL);
@@ -176,9 +174,7 @@ struct drm_bridge *drm_panel_bridge_add(struct drm_panel *panel,
panel_bridge->bridge.of_node = panel->dev->of_node;
#endif
- ret = drm_bridge_add(&panel_bridge->bridge);
- if (ret)
- return ERR_PTR(ret);
+ drm_bridge_add(&panel_bridge->bridge);
return &panel_bridge->bridge;
}
@@ -198,3 +194,33 @@ void drm_panel_bridge_remove(struct drm_bridge *bridge)
devm_kfree(panel_bridge->panel->dev, bridge);
}
EXPORT_SYMBOL(drm_panel_bridge_remove);
+
+static void devm_drm_panel_bridge_release(struct device *dev, void *res)
+{
+ struct drm_bridge **bridge = res;
+
+ drm_panel_bridge_remove(*bridge);
+}
+
+struct drm_bridge *devm_drm_panel_bridge_add(struct device *dev,
+ struct drm_panel *panel,
+ u32 connector_type)
+{
+ struct drm_bridge **ptr, *bridge;
+
+ ptr = devres_alloc(devm_drm_panel_bridge_release, sizeof(*ptr),
+ GFP_KERNEL);
+ if (!ptr)
+ return ERR_PTR(-ENOMEM);
+
+ bridge = drm_panel_bridge_add(panel, connector_type);
+ if (!IS_ERR(bridge)) {
+ *ptr = bridge;
+ devres_add(dev, ptr);
+ } else {
+ devres_free(ptr);
+ }
+
+ return bridge;
+}
+EXPORT_SYMBOL(devm_drm_panel_bridge_add);
diff --git a/drivers/gpu/drm/bridge/parade-ps8622.c b/drivers/gpu/drm/bridge/parade-ps8622.c
index 6f22f9fec9bf..81198f5e9afa 100644
--- a/drivers/gpu/drm/bridge/parade-ps8622.c
+++ b/drivers/gpu/drm/bridge/parade-ps8622.c
@@ -476,7 +476,6 @@ static const struct drm_connector_helper_funcs ps8622_connector_helper_funcs = {
};
static const struct drm_connector_funcs ps8622_connector_funcs = {
- .dpms = drm_atomic_helper_connector_dpms,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = drm_connector_cleanup,
.reset = drm_atomic_helper_connector_reset,
@@ -598,11 +597,7 @@ static int ps8622_probe(struct i2c_client *client,
ps8622->bridge.funcs = &ps8622_bridge_funcs;
ps8622->bridge.of_node = dev->of_node;
- ret = drm_bridge_add(&ps8622->bridge);
- if (ret) {
- DRM_ERROR("Failed to add bridge\n");
- return ret;
- }
+ drm_bridge_add(&ps8622->bridge);
i2c_set_clientdata(client, ps8622);
diff --git a/drivers/gpu/drm/bridge/sii902x.c b/drivers/gpu/drm/bridge/sii902x.c
index 9b87067c022c..b1ab4ab09532 100644
--- a/drivers/gpu/drm/bridge/sii902x.c
+++ b/drivers/gpu/drm/bridge/sii902x.c
@@ -124,7 +124,6 @@ sii902x_connector_detect(struct drm_connector *connector, bool force)
}
static const struct drm_connector_funcs sii902x_connector_funcs = {
- .dpms = drm_atomic_helper_connector_dpms,
.detect = sii902x_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = drm_connector_cleanup,
@@ -269,7 +268,7 @@ static void sii902x_bridge_mode_set(struct drm_bridge *bridge,
if (ret)
return;
- ret = drm_hdmi_avi_infoframe_from_display_mode(&frame, adj);
+ ret = drm_hdmi_avi_infoframe_from_display_mode(&frame, adj, false);
if (ret < 0) {
DRM_ERROR("couldn't fill AVI infoframe\n");
return;
@@ -418,11 +417,7 @@ static int sii902x_probe(struct i2c_client *client,
sii902x->bridge.funcs = &sii902x_bridge_funcs;
sii902x->bridge.of_node = dev->of_node;
- ret = drm_bridge_add(&sii902x->bridge);
- if (ret) {
- dev_err(dev, "Failed to add drm_bridge\n");
- return ret;
- }
+ drm_bridge_add(&sii902x->bridge);
i2c_set_clientdata(client, sii902x);
diff --git a/drivers/gpu/drm/bridge/sil-sii8620.c b/drivers/gpu/drm/bridge/sil-sii8620.c
index 2d51a2269fc6..5131bfb94f06 100644
--- a/drivers/gpu/drm/bridge/sil-sii8620.c
+++ b/drivers/gpu/drm/bridge/sil-sii8620.c
@@ -597,9 +597,9 @@ static void sii8620_mt_read_devcap(struct sii8620 *ctx, bool xdevcap)
static void sii8620_mt_read_devcap_reg_recv(struct sii8620 *ctx,
struct sii8620_mt_msg *msg)
{
- u8 reg = msg->reg[0] & 0x7f;
+ u8 reg = msg->reg[1] & 0x7f;
- if (msg->reg[0] & 0x80)
+ if (msg->reg[1] & 0x80)
ctx->xdevcap[reg] = msg->ret;
else
ctx->devcap[reg] = msg->ret;
diff --git a/drivers/gpu/drm/bridge/synopsys/Kconfig b/drivers/gpu/drm/bridge/synopsys/Kconfig
index 53e78d092d18..3cc53b44186e 100644
--- a/drivers/gpu/drm/bridge/synopsys/Kconfig
+++ b/drivers/gpu/drm/bridge/synopsys/Kconfig
@@ -2,6 +2,7 @@ config DRM_DW_HDMI
tristate
select DRM_KMS_HELPER
select REGMAP_MMIO
+ select CEC_CORE if CEC_NOTIFIER
config DRM_DW_HDMI_AHB_AUDIO
tristate "Synopsys Designware AHB Audio interface"
@@ -22,3 +23,18 @@ config DRM_DW_HDMI_I2S_AUDIO
help
Support the I2S Audio interface which is part of the Synopsys
Designware HDMI block.
+
+config DRM_DW_HDMI_CEC
+ tristate "Synopsis Designware CEC interface"
+ depends on DRM_DW_HDMI
+ select CEC_CORE
+ select CEC_NOTIFIER
+ help
+ Support the CE interface which is part of the Synopsys
+ Designware HDMI block.
+
+config DRM_DW_MIPI_DSI
+ tristate
+ select DRM_KMS_HELPER
+ select DRM_MIPI_DSI
+ select DRM_PANEL_BRIDGE
diff --git a/drivers/gpu/drm/bridge/synopsys/Makefile b/drivers/gpu/drm/bridge/synopsys/Makefile
index 17aa7a65b57e..5dad97d920be 100644
--- a/drivers/gpu/drm/bridge/synopsys/Makefile
+++ b/drivers/gpu/drm/bridge/synopsys/Makefile
@@ -3,3 +3,6 @@
obj-$(CONFIG_DRM_DW_HDMI) += dw-hdmi.o
obj-$(CONFIG_DRM_DW_HDMI_AHB_AUDIO) += dw-hdmi-ahb-audio.o
obj-$(CONFIG_DRM_DW_HDMI_I2S_AUDIO) += dw-hdmi-i2s-audio.o
+obj-$(CONFIG_DRM_DW_HDMI_CEC) += dw-hdmi-cec.o
+
+obj-$(CONFIG_DRM_DW_MIPI_DSI) += dw-mipi-dsi.o
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c
index 8f2d1379c880..cf3f0caf9c63 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c
@@ -517,7 +517,7 @@ static snd_pcm_uframes_t dw_hdmi_pointer(struct snd_pcm_substream *substream)
return bytes_to_frames(runtime, dw->buf_offset);
}
-static struct snd_pcm_ops snd_dw_hdmi_ops = {
+static const struct snd_pcm_ops snd_dw_hdmi_ops = {
.open = dw_hdmi_open,
.close = dw_hdmi_close,
.ioctl = snd_pcm_lib_ioctl,
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-cec.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-cec.c
new file mode 100644
index 000000000000..6c323510f128
--- /dev/null
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-cec.c
@@ -0,0 +1,327 @@
+/*
+ * Designware HDMI CEC driver
+ *
+ * Copyright (C) 2015-2017 Russell King.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+
+#include <drm/drm_edid.h>
+
+#include <media/cec.h>
+#include <media/cec-notifier.h>
+
+#include "dw-hdmi-cec.h"
+
+enum {
+ HDMI_IH_CEC_STAT0 = 0x0106,
+ HDMI_IH_MUTE_CEC_STAT0 = 0x0186,
+
+ HDMI_CEC_CTRL = 0x7d00,
+ CEC_CTRL_START = BIT(0),
+ CEC_CTRL_FRAME_TYP = 3 << 1,
+ CEC_CTRL_RETRY = 0 << 1,
+ CEC_CTRL_NORMAL = 1 << 1,
+ CEC_CTRL_IMMED = 2 << 1,
+
+ HDMI_CEC_STAT = 0x7d01,
+ CEC_STAT_DONE = BIT(0),
+ CEC_STAT_EOM = BIT(1),
+ CEC_STAT_NACK = BIT(2),
+ CEC_STAT_ARBLOST = BIT(3),
+ CEC_STAT_ERROR_INIT = BIT(4),
+ CEC_STAT_ERROR_FOLL = BIT(5),
+ CEC_STAT_WAKEUP = BIT(6),
+
+ HDMI_CEC_MASK = 0x7d02,
+ HDMI_CEC_POLARITY = 0x7d03,
+ HDMI_CEC_INT = 0x7d04,
+ HDMI_CEC_ADDR_L = 0x7d05,
+ HDMI_CEC_ADDR_H = 0x7d06,
+ HDMI_CEC_TX_CNT = 0x7d07,
+ HDMI_CEC_RX_CNT = 0x7d08,
+ HDMI_CEC_TX_DATA0 = 0x7d10,
+ HDMI_CEC_RX_DATA0 = 0x7d20,
+ HDMI_CEC_LOCK = 0x7d30,
+ HDMI_CEC_WKUPCTRL = 0x7d31,
+};
+
+struct dw_hdmi_cec {
+ struct dw_hdmi *hdmi;
+ const struct dw_hdmi_cec_ops *ops;
+ u32 addresses;
+ struct cec_adapter *adap;
+ struct cec_msg rx_msg;
+ unsigned int tx_status;
+ bool tx_done;
+ bool rx_done;
+ struct cec_notifier *notify;
+ int irq;
+};
+
+static void dw_hdmi_write(struct dw_hdmi_cec *cec, u8 val, int offset)
+{
+ cec->ops->write(cec->hdmi, val, offset);
+}
+
+static u8 dw_hdmi_read(struct dw_hdmi_cec *cec, int offset)
+{
+ return cec->ops->read(cec->hdmi, offset);
+}
+
+static int dw_hdmi_cec_log_addr(struct cec_adapter *adap, u8 logical_addr)
+{
+ struct dw_hdmi_cec *cec = cec_get_drvdata(adap);
+
+ if (logical_addr == CEC_LOG_ADDR_INVALID)
+ cec->addresses = 0;
+ else
+ cec->addresses |= BIT(logical_addr) | BIT(15);
+
+ dw_hdmi_write(cec, cec->addresses & 255, HDMI_CEC_ADDR_L);
+ dw_hdmi_write(cec, cec->addresses >> 8, HDMI_CEC_ADDR_H);
+
+ return 0;
+}
+
+static int dw_hdmi_cec_transmit(struct cec_adapter *adap, u8 attempts,
+ u32 signal_free_time, struct cec_msg *msg)
+{
+ struct dw_hdmi_cec *cec = cec_get_drvdata(adap);
+ unsigned int i, ctrl;
+
+ switch (signal_free_time) {
+ case CEC_SIGNAL_FREE_TIME_RETRY:
+ ctrl = CEC_CTRL_RETRY;
+ break;
+ case CEC_SIGNAL_FREE_TIME_NEW_INITIATOR:
+ default:
+ ctrl = CEC_CTRL_NORMAL;
+ break;
+ case CEC_SIGNAL_FREE_TIME_NEXT_XFER:
+ ctrl = CEC_CTRL_IMMED;
+ break;
+ }
+
+ for (i = 0; i < msg->len; i++)
+ dw_hdmi_write(cec, msg->msg[i], HDMI_CEC_TX_DATA0 + i);
+
+ dw_hdmi_write(cec, msg->len, HDMI_CEC_TX_CNT);
+ dw_hdmi_write(cec, ctrl | CEC_CTRL_START, HDMI_CEC_CTRL);
+
+ return 0;
+}
+
+static irqreturn_t dw_hdmi_cec_hardirq(int irq, void *data)
+{
+ struct cec_adapter *adap = data;
+ struct dw_hdmi_cec *cec = cec_get_drvdata(adap);
+ unsigned int stat = dw_hdmi_read(cec, HDMI_IH_CEC_STAT0);
+ irqreturn_t ret = IRQ_HANDLED;
+
+ if (stat == 0)
+ return IRQ_NONE;
+
+ dw_hdmi_write(cec, stat, HDMI_IH_CEC_STAT0);
+
+ if (stat & CEC_STAT_ERROR_INIT) {
+ cec->tx_status = CEC_TX_STATUS_ERROR;
+ cec->tx_done = true;
+ ret = IRQ_WAKE_THREAD;
+ } else if (stat & CEC_STAT_DONE) {
+ cec->tx_status = CEC_TX_STATUS_OK;
+ cec->tx_done = true;
+ ret = IRQ_WAKE_THREAD;
+ } else if (stat & CEC_STAT_NACK) {
+ cec->tx_status = CEC_TX_STATUS_NACK;
+ cec->tx_done = true;
+ ret = IRQ_WAKE_THREAD;
+ }
+
+ if (stat & CEC_STAT_EOM) {
+ unsigned int len, i;
+
+ len = dw_hdmi_read(cec, HDMI_CEC_RX_CNT);
+ if (len > sizeof(cec->rx_msg.msg))
+ len = sizeof(cec->rx_msg.msg);
+
+ for (i = 0; i < len; i++)
+ cec->rx_msg.msg[i] =
+ dw_hdmi_read(cec, HDMI_CEC_RX_DATA0 + i);
+
+ dw_hdmi_write(cec, 0, HDMI_CEC_LOCK);
+
+ cec->rx_msg.len = len;
+ smp_wmb();
+ cec->rx_done = true;
+
+ ret = IRQ_WAKE_THREAD;
+ }
+
+ return ret;
+}
+
+static irqreturn_t dw_hdmi_cec_thread(int irq, void *data)
+{
+ struct cec_adapter *adap = data;
+ struct dw_hdmi_cec *cec = cec_get_drvdata(adap);
+
+ if (cec->tx_done) {
+ cec->tx_done = false;
+ cec_transmit_attempt_done(adap, cec->tx_status);
+ }
+ if (cec->rx_done) {
+ cec->rx_done = false;
+ smp_rmb();
+ cec_received_msg(adap, &cec->rx_msg);
+ }
+ return IRQ_HANDLED;
+}
+
+static int dw_hdmi_cec_enable(struct cec_adapter *adap, bool enable)
+{
+ struct dw_hdmi_cec *cec = cec_get_drvdata(adap);
+
+ if (!enable) {
+ dw_hdmi_write(cec, ~0, HDMI_CEC_MASK);
+ dw_hdmi_write(cec, ~0, HDMI_IH_MUTE_CEC_STAT0);
+ dw_hdmi_write(cec, 0, HDMI_CEC_POLARITY);
+
+ cec->ops->disable(cec->hdmi);
+ } else {
+ unsigned int irqs;
+
+ dw_hdmi_write(cec, 0, HDMI_CEC_CTRL);
+ dw_hdmi_write(cec, ~0, HDMI_IH_CEC_STAT0);
+ dw_hdmi_write(cec, 0, HDMI_CEC_LOCK);
+
+ dw_hdmi_cec_log_addr(cec->adap, CEC_LOG_ADDR_INVALID);
+
+ cec->ops->enable(cec->hdmi);
+
+ irqs = CEC_STAT_ERROR_INIT | CEC_STAT_NACK | CEC_STAT_EOM |
+ CEC_STAT_DONE;
+ dw_hdmi_write(cec, irqs, HDMI_CEC_POLARITY);
+ dw_hdmi_write(cec, ~irqs, HDMI_CEC_MASK);
+ dw_hdmi_write(cec, ~irqs, HDMI_IH_MUTE_CEC_STAT0);
+ }
+ return 0;
+}
+
+static const struct cec_adap_ops dw_hdmi_cec_ops = {
+ .adap_enable = dw_hdmi_cec_enable,
+ .adap_log_addr = dw_hdmi_cec_log_addr,
+ .adap_transmit = dw_hdmi_cec_transmit,
+};
+
+static void dw_hdmi_cec_del(void *data)
+{
+ struct dw_hdmi_cec *cec = data;
+
+ cec_delete_adapter(cec->adap);
+}
+
+static int dw_hdmi_cec_probe(struct platform_device *pdev)
+{
+ struct dw_hdmi_cec_data *data = dev_get_platdata(&pdev->dev);
+ struct dw_hdmi_cec *cec;
+ int ret;
+
+ if (!data)
+ return -ENXIO;
+
+ /*
+ * Our device is just a convenience - we want to link to the real
+ * hardware device here, so that userspace can see the association
+ * between the HDMI hardware and its associated CEC chardev.
+ */
+ cec = devm_kzalloc(&pdev->dev, sizeof(*cec), GFP_KERNEL);
+ if (!cec)
+ return -ENOMEM;
+
+ cec->irq = data->irq;
+ cec->ops = data->ops;
+ cec->hdmi = data->hdmi;
+
+ platform_set_drvdata(pdev, cec);
+
+ dw_hdmi_write(cec, 0, HDMI_CEC_TX_CNT);
+ dw_hdmi_write(cec, ~0, HDMI_CEC_MASK);
+ dw_hdmi_write(cec, ~0, HDMI_IH_MUTE_CEC_STAT0);
+ dw_hdmi_write(cec, 0, HDMI_CEC_POLARITY);
+
+ cec->adap = cec_allocate_adapter(&dw_hdmi_cec_ops, cec, "dw_hdmi",
+ CEC_CAP_LOG_ADDRS | CEC_CAP_TRANSMIT |
+ CEC_CAP_RC | CEC_CAP_PASSTHROUGH,
+ CEC_MAX_LOG_ADDRS);
+ if (IS_ERR(cec->adap))
+ return PTR_ERR(cec->adap);
+
+ /* override the module pointer */
+ cec->adap->owner = THIS_MODULE;
+
+ ret = devm_add_action(&pdev->dev, dw_hdmi_cec_del, cec);
+ if (ret) {
+ cec_delete_adapter(cec->adap);
+ return ret;
+ }
+
+ ret = devm_request_threaded_irq(&pdev->dev, cec->irq,
+ dw_hdmi_cec_hardirq,
+ dw_hdmi_cec_thread, IRQF_SHARED,
+ "dw-hdmi-cec", cec->adap);
+ if (ret < 0)
+ return ret;
+
+ cec->notify = cec_notifier_get(pdev->dev.parent);
+ if (!cec->notify)
+ return -ENOMEM;
+
+ ret = cec_register_adapter(cec->adap, pdev->dev.parent);
+ if (ret < 0) {
+ cec_notifier_put(cec->notify);
+ return ret;
+ }
+
+ /*
+ * CEC documentation says we must not call cec_delete_adapter
+ * after a successful call to cec_register_adapter().
+ */
+ devm_remove_action(&pdev->dev, dw_hdmi_cec_del, cec);
+
+ cec_register_cec_notifier(cec->adap, cec->notify);
+
+ return 0;
+}
+
+static int dw_hdmi_cec_remove(struct platform_device *pdev)
+{
+ struct dw_hdmi_cec *cec = platform_get_drvdata(pdev);
+
+ cec_unregister_adapter(cec->adap);
+ cec_notifier_put(cec->notify);
+
+ return 0;
+}
+
+static struct platform_driver dw_hdmi_cec_driver = {
+ .probe = dw_hdmi_cec_probe,
+ .remove = dw_hdmi_cec_remove,
+ .driver = {
+ .name = "dw-hdmi-cec",
+ },
+};
+module_platform_driver(dw_hdmi_cec_driver);
+
+MODULE_AUTHOR("Russell King <rmk+kernel@armlinux.org.uk>");
+MODULE_DESCRIPTION("Synopsys Designware HDMI CEC driver for i.MX");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS(PLATFORM_MODULE_PREFIX "dw-hdmi-cec");
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-cec.h b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-cec.h
new file mode 100644
index 000000000000..cf4dc121a2c4
--- /dev/null
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-cec.h
@@ -0,0 +1,19 @@
+#ifndef DW_HDMI_CEC_H
+#define DW_HDMI_CEC_H
+
+struct dw_hdmi;
+
+struct dw_hdmi_cec_ops {
+ void (*write)(struct dw_hdmi *hdmi, u8 val, int offset);
+ u8 (*read)(struct dw_hdmi *hdmi, int offset);
+ void (*enable)(struct dw_hdmi *hdmi);
+ void (*disable)(struct dw_hdmi *hdmi);
+};
+
+struct dw_hdmi_cec_data {
+ struct dw_hdmi *hdmi;
+ const struct dw_hdmi_cec_ops *ops;
+ int irq;
+};
+
+#endif
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c
index b2cf59f54c88..3b7e5c59a5e9 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c
@@ -1,7 +1,8 @@
/*
* dw-hdmi-i2s-audio.c
*
- * Copyright (c) 2016 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
+ * Copyright (c) 2017 Renesas Solutions Corp.
+ * Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
index ead11242c4b9..bf14214fa464 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
@@ -35,8 +35,12 @@
#include "dw-hdmi.h"
#include "dw-hdmi-audio.h"
+#include "dw-hdmi-cec.h"
+
+#include <media/cec-notifier.h>
#define DDC_SEGMENT_ADDR 0x30
+
#define HDMI_EDID_LEN 512
enum hdmi_datamap {
@@ -130,6 +134,7 @@ struct dw_hdmi {
unsigned int version;
struct platform_device *audio;
+ struct platform_device *cec;
struct device *dev;
struct clk *isfr_clk;
struct clk *iahb_clk;
@@ -163,6 +168,7 @@ struct dw_hdmi {
bool bridge_is_on; /* indicates the bridge is on */
bool rxsense; /* rxsense state */
u8 phy_mask; /* desired phy int mask settings */
+ u8 mc_clkdis; /* clock disable register */
spinlock_t audio_lock;
struct mutex audio_mutex;
@@ -175,6 +181,8 @@ struct dw_hdmi {
struct regmap *regm;
void (*enable_audio)(struct dw_hdmi *hdmi);
void (*disable_audio)(struct dw_hdmi *hdmi);
+
+ struct cec_notifier *cec_notifier;
};
#define HDMI_IH_PHY_STAT0_RX_SENSE \
@@ -546,8 +554,11 @@ EXPORT_SYMBOL_GPL(dw_hdmi_set_sample_rate);
static void hdmi_enable_audio_clk(struct dw_hdmi *hdmi, bool enable)
{
- hdmi_modb(hdmi, enable ? 0 : HDMI_MC_CLKDIS_AUDCLK_DISABLE,
- HDMI_MC_CLKDIS_AUDCLK_DISABLE, HDMI_MC_CLKDIS);
+ if (enable)
+ hdmi->mc_clkdis &= ~HDMI_MC_CLKDIS_AUDCLK_DISABLE;
+ else
+ hdmi->mc_clkdis |= HDMI_MC_CLKDIS_AUDCLK_DISABLE;
+ hdmi_writeb(hdmi, hdmi->mc_clkdis, HDMI_MC_CLKDIS);
}
static void dw_hdmi_ahb_audio_enable(struct dw_hdmi *hdmi)
@@ -1317,7 +1328,7 @@ static void hdmi_config_AVI(struct dw_hdmi *hdmi, struct drm_display_mode *mode)
u8 val;
/* Initialise info frame from DRM mode */
- drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
+ drm_hdmi_avi_infoframe_from_display_mode(&frame, mode, false);
if (hdmi_bus_fmt_is_yuv444(hdmi->hdmi_data.enc_out_bus_format))
frame.colorspace = HDMI_COLORSPACE_YUV444;
@@ -1569,8 +1580,6 @@ static void hdmi_av_composer(struct dw_hdmi *hdmi,
/* HDMI Initialization Step B.4 */
static void dw_hdmi_enable_video_path(struct dw_hdmi *hdmi)
{
- u8 clkdis;
-
/* control period minimum duration */
hdmi_writeb(hdmi, 12, HDMI_FC_CTRLDUR);
hdmi_writeb(hdmi, 32, HDMI_FC_EXCTRLDUR);
@@ -1582,17 +1591,21 @@ static void dw_hdmi_enable_video_path(struct dw_hdmi *hdmi)
hdmi_writeb(hdmi, 0x21, HDMI_FC_CH2PREAM);
/* Enable pixel clock and tmds data path */
- clkdis = 0x7F;
- clkdis &= ~HDMI_MC_CLKDIS_PIXELCLK_DISABLE;
- hdmi_writeb(hdmi, clkdis, HDMI_MC_CLKDIS);
+ hdmi->mc_clkdis |= HDMI_MC_CLKDIS_HDCPCLK_DISABLE |
+ HDMI_MC_CLKDIS_CSCCLK_DISABLE |
+ HDMI_MC_CLKDIS_AUDCLK_DISABLE |
+ HDMI_MC_CLKDIS_PREPCLK_DISABLE |
+ HDMI_MC_CLKDIS_TMDSCLK_DISABLE;
+ hdmi->mc_clkdis &= ~HDMI_MC_CLKDIS_PIXELCLK_DISABLE;
+ hdmi_writeb(hdmi, hdmi->mc_clkdis, HDMI_MC_CLKDIS);
- clkdis &= ~HDMI_MC_CLKDIS_TMDSCLK_DISABLE;
- hdmi_writeb(hdmi, clkdis, HDMI_MC_CLKDIS);
+ hdmi->mc_clkdis &= ~HDMI_MC_CLKDIS_TMDSCLK_DISABLE;
+ hdmi_writeb(hdmi, hdmi->mc_clkdis, HDMI_MC_CLKDIS);
/* Enable csc path */
if (is_color_space_conversion(hdmi)) {
- clkdis &= ~HDMI_MC_CLKDIS_CSCCLK_DISABLE;
- hdmi_writeb(hdmi, clkdis, HDMI_MC_CLKDIS);
+ hdmi->mc_clkdis &= ~HDMI_MC_CLKDIS_CSCCLK_DISABLE;
+ hdmi_writeb(hdmi, hdmi->mc_clkdis, HDMI_MC_CLKDIS);
}
/* Enable color space conversion if needed */
@@ -1783,7 +1796,6 @@ static void initialize_hdmi_ih_mutes(struct dw_hdmi *hdmi)
hdmi_writeb(hdmi, 0xff, HDMI_AUD_HBR_MASK);
hdmi_writeb(hdmi, 0xff, HDMI_GP_MASK);
hdmi_writeb(hdmi, 0xff, HDMI_A_APIINTMSK);
- hdmi_writeb(hdmi, 0xff, HDMI_CEC_MASK);
hdmi_writeb(hdmi, 0xff, HDMI_I2CM_INT);
hdmi_writeb(hdmi, 0xff, HDMI_I2CM_CTLINT);
@@ -1896,6 +1908,7 @@ static int dw_hdmi_connector_get_modes(struct drm_connector *connector)
hdmi->sink_is_hdmi = drm_detect_hdmi_monitor(edid);
hdmi->sink_has_audio = drm_detect_monitor_audio(edid);
drm_mode_connector_update_edid_property(connector, edid);
+ cec_notifier_set_phys_addr_from_edid(hdmi->cec_notifier, edid);
ret = drm_add_edid_modes(connector, edid);
/* Store the ELD */
drm_edid_to_eld(connector, edid);
@@ -1920,7 +1933,6 @@ static void dw_hdmi_connector_force(struct drm_connector *connector)
}
static const struct drm_connector_funcs dw_hdmi_connector_funcs = {
- .dpms = drm_atomic_helper_connector_dpms,
.fill_modes = drm_helper_probe_single_connector_modes,
.detect = dw_hdmi_connector_detect,
.destroy = drm_connector_cleanup,
@@ -2119,11 +2131,16 @@ static irqreturn_t dw_hdmi_irq(int irq, void *dev_id)
* ask the source to re-read the EDID.
*/
if (intr_stat &
- (HDMI_IH_PHY_STAT0_RX_SENSE | HDMI_IH_PHY_STAT0_HPD))
+ (HDMI_IH_PHY_STAT0_RX_SENSE | HDMI_IH_PHY_STAT0_HPD)) {
__dw_hdmi_setup_rx_sense(hdmi,
phy_stat & HDMI_PHY_HPD,
phy_stat & HDMI_PHY_RX_SENSE);
+ if ((phy_stat & (HDMI_PHY_RX_SENSE | HDMI_PHY_HPD)) == 0)
+ cec_notifier_set_phys_addr(hdmi->cec_notifier,
+ CEC_PHYS_ADDR_INVALID);
+ }
+
if (intr_stat & HDMI_IH_PHY_STAT0_HPD) {
dev_dbg(hdmi->dev, "EVENT=%s\n",
phy_int_pol & HDMI_PHY_HPD ? "plugin" : "plugout");
@@ -2170,6 +2187,7 @@ static const struct dw_hdmi_phy_data dw_hdmi_phys[] = {
.name = "DWC HDMI 2.0 TX PHY",
.gen = 2,
.has_svsret = true,
+ .configure = hdmi_phy_configure_dwc_hdmi_3d_tx,
}, {
.type = DW_HDMI_PHY_VENDOR_PHY,
.name = "Vendor PHY",
@@ -2219,6 +2237,29 @@ static int dw_hdmi_detect_phy(struct dw_hdmi *hdmi)
return -ENODEV;
}
+static void dw_hdmi_cec_enable(struct dw_hdmi *hdmi)
+{
+ mutex_lock(&hdmi->mutex);
+ hdmi->mc_clkdis &= ~HDMI_MC_CLKDIS_CECCLK_DISABLE;
+ hdmi_writeb(hdmi, hdmi->mc_clkdis, HDMI_MC_CLKDIS);
+ mutex_unlock(&hdmi->mutex);
+}
+
+static void dw_hdmi_cec_disable(struct dw_hdmi *hdmi)
+{
+ mutex_lock(&hdmi->mutex);
+ hdmi->mc_clkdis |= HDMI_MC_CLKDIS_CECCLK_DISABLE;
+ hdmi_writeb(hdmi, hdmi->mc_clkdis, HDMI_MC_CLKDIS);
+ mutex_unlock(&hdmi->mutex);
+}
+
+static const struct dw_hdmi_cec_ops dw_hdmi_cec_ops = {
+ .write = hdmi_writeb,
+ .read = hdmi_readb,
+ .enable = dw_hdmi_cec_enable,
+ .disable = dw_hdmi_cec_disable,
+};
+
static const struct regmap_config hdmi_regmap_8bit_config = {
.reg_bits = 32,
.val_bits = 8,
@@ -2241,6 +2282,7 @@ __dw_hdmi_probe(struct platform_device *pdev,
struct device_node *np = dev->of_node;
struct platform_device_info pdevinfo;
struct device_node *ddc_node;
+ struct dw_hdmi_cec_data cec;
struct dw_hdmi *hdmi;
struct resource *iores = NULL;
int irq;
@@ -2261,6 +2303,7 @@ __dw_hdmi_probe(struct platform_device *pdev,
hdmi->disabled = true;
hdmi->rxsense = true;
hdmi->phy_mask = (u8)~(HDMI_PHY_HPD | HDMI_PHY_RX_SENSE);
+ hdmi->mc_clkdis = 0x7f;
mutex_init(&hdmi->mutex);
mutex_init(&hdmi->audio_mutex);
@@ -2376,6 +2419,12 @@ __dw_hdmi_probe(struct platform_device *pdev,
if (ret)
goto err_iahb;
+ hdmi->cec_notifier = cec_notifier_get(dev);
+ if (!hdmi->cec_notifier) {
+ ret = -ENOMEM;
+ goto err_iahb;
+ }
+
/*
* To prevent overflows in HDMI_IH_FC_STAT2, set the clk regenerator
* N and cts values before enabling phy
@@ -2438,6 +2487,19 @@ __dw_hdmi_probe(struct platform_device *pdev,
hdmi->audio = platform_device_register_full(&pdevinfo);
}
+ if (config0 & HDMI_CONFIG0_CEC) {
+ cec.hdmi = hdmi;
+ cec.ops = &dw_hdmi_cec_ops;
+ cec.irq = irq;
+
+ pdevinfo.name = "dw-hdmi-cec";
+ pdevinfo.data = &cec;
+ pdevinfo.size_data = sizeof(cec);
+ pdevinfo.dma_mask = 0;
+
+ hdmi->cec = platform_device_register_full(&pdevinfo);
+ }
+
/* Reset HDMI DDC I2C master controller and mute I2CM interrupts */
if (hdmi->i2c)
dw_hdmi_i2c_init(hdmi);
@@ -2452,6 +2514,9 @@ err_iahb:
hdmi->ddc = NULL;
}
+ if (hdmi->cec_notifier)
+ cec_notifier_put(hdmi->cec_notifier);
+
clk_disable_unprepare(hdmi->iahb_clk);
err_isfr:
clk_disable_unprepare(hdmi->isfr_clk);
@@ -2465,10 +2530,15 @@ static void __dw_hdmi_remove(struct dw_hdmi *hdmi)
{
if (hdmi->audio && !IS_ERR(hdmi->audio))
platform_device_unregister(hdmi->audio);
+ if (!IS_ERR(hdmi->cec))
+ platform_device_unregister(hdmi->cec);
/* Disable all interrupts */
hdmi_writeb(hdmi, ~0, HDMI_IH_MUTE_PHY_STAT0);
+ if (hdmi->cec_notifier)
+ cec_notifier_put(hdmi->cec_notifier);
+
clk_disable_unprepare(hdmi->iahb_clk);
clk_disable_unprepare(hdmi->isfr_clk);
@@ -2485,17 +2555,12 @@ int dw_hdmi_probe(struct platform_device *pdev,
const struct dw_hdmi_plat_data *plat_data)
{
struct dw_hdmi *hdmi;
- int ret;
hdmi = __dw_hdmi_probe(pdev, plat_data);
if (IS_ERR(hdmi))
return PTR_ERR(hdmi);
- ret = drm_bridge_add(&hdmi->bridge);
- if (ret < 0) {
- __dw_hdmi_remove(hdmi);
- return ret;
- }
+ drm_bridge_add(&hdmi->bridge);
return 0;
}
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.h b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.h
index c59f87e1483e..9d90eb9c46e5 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.h
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.h
@@ -478,51 +478,6 @@
#define HDMI_A_PRESETUP 0x501A
#define HDMI_A_SRM_BASE 0x5020
-/* CEC Engine Registers */
-#define HDMI_CEC_CTRL 0x7D00
-#define HDMI_CEC_STAT 0x7D01
-#define HDMI_CEC_MASK 0x7D02
-#define HDMI_CEC_POLARITY 0x7D03
-#define HDMI_CEC_INT 0x7D04
-#define HDMI_CEC_ADDR_L 0x7D05
-#define HDMI_CEC_ADDR_H 0x7D06
-#define HDMI_CEC_TX_CNT 0x7D07
-#define HDMI_CEC_RX_CNT 0x7D08
-#define HDMI_CEC_TX_DATA0 0x7D10
-#define HDMI_CEC_TX_DATA1 0x7D11
-#define HDMI_CEC_TX_DATA2 0x7D12
-#define HDMI_CEC_TX_DATA3 0x7D13
-#define HDMI_CEC_TX_DATA4 0x7D14
-#define HDMI_CEC_TX_DATA5 0x7D15
-#define HDMI_CEC_TX_DATA6 0x7D16
-#define HDMI_CEC_TX_DATA7 0x7D17
-#define HDMI_CEC_TX_DATA8 0x7D18
-#define HDMI_CEC_TX_DATA9 0x7D19
-#define HDMI_CEC_TX_DATA10 0x7D1a
-#define HDMI_CEC_TX_DATA11 0x7D1b
-#define HDMI_CEC_TX_DATA12 0x7D1c
-#define HDMI_CEC_TX_DATA13 0x7D1d
-#define HDMI_CEC_TX_DATA14 0x7D1e
-#define HDMI_CEC_TX_DATA15 0x7D1f
-#define HDMI_CEC_RX_DATA0 0x7D20
-#define HDMI_CEC_RX_DATA1 0x7D21
-#define HDMI_CEC_RX_DATA2 0x7D22
-#define HDMI_CEC_RX_DATA3 0x7D23
-#define HDMI_CEC_RX_DATA4 0x7D24
-#define HDMI_CEC_RX_DATA5 0x7D25
-#define HDMI_CEC_RX_DATA6 0x7D26
-#define HDMI_CEC_RX_DATA7 0x7D27
-#define HDMI_CEC_RX_DATA8 0x7D28
-#define HDMI_CEC_RX_DATA9 0x7D29
-#define HDMI_CEC_RX_DATA10 0x7D2a
-#define HDMI_CEC_RX_DATA11 0x7D2b
-#define HDMI_CEC_RX_DATA12 0x7D2c
-#define HDMI_CEC_RX_DATA13 0x7D2d
-#define HDMI_CEC_RX_DATA14 0x7D2e
-#define HDMI_CEC_RX_DATA15 0x7D2f
-#define HDMI_CEC_LOCK 0x7D30
-#define HDMI_CEC_WKUPCTRL 0x7D31
-
/* I2C Master Registers (E-DDC) */
#define HDMI_I2CM_SLAVE 0x7E00
#define HDMI_I2CM_ADDRESS 0x7E01
@@ -555,6 +510,7 @@ enum {
/* CONFIG0_ID field values */
HDMI_CONFIG0_I2S = 0x10,
+ HDMI_CONFIG0_CEC = 0x02,
/* CONFIG1_ID field values */
HDMI_CONFIG1_AHB = 0x01,
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
new file mode 100644
index 000000000000..63c7a01b7053
--- /dev/null
+++ b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
@@ -0,0 +1,981 @@
+/*
+ * Copyright (c) 2016, Fuzhou Rockchip Electronics Co., Ltd
+ * Copyright (C) STMicroelectronics SA 2017
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Modified by Philippe Cornu <philippe.cornu@st.com>
+ * This generic Synopsys DesignWare MIPI DSI host driver is based on the
+ * Rockchip version from rockchip/dw-mipi-dsi.c with phy & bridge APIs.
+ */
+
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_of.h>
+#include <drm/bridge/dw_mipi_dsi.h>
+#include <video/mipi_display.h>
+
+#define DSI_VERSION 0x00
+#define DSI_PWR_UP 0x04
+#define RESET 0
+#define POWERUP BIT(0)
+
+#define DSI_CLKMGR_CFG 0x08
+#define TO_CLK_DIVIDSION(div) (((div) & 0xff) << 8)
+#define TX_ESC_CLK_DIVIDSION(div) (((div) & 0xff) << 0)
+
+#define DSI_DPI_VCID 0x0c
+#define DPI_VID(vid) (((vid) & 0x3) << 0)
+
+#define DSI_DPI_COLOR_CODING 0x10
+#define EN18_LOOSELY BIT(8)
+#define DPI_COLOR_CODING_16BIT_1 0x0
+#define DPI_COLOR_CODING_16BIT_2 0x1
+#define DPI_COLOR_CODING_16BIT_3 0x2
+#define DPI_COLOR_CODING_18BIT_1 0x3
+#define DPI_COLOR_CODING_18BIT_2 0x4
+#define DPI_COLOR_CODING_24BIT 0x5
+
+#define DSI_DPI_CFG_POL 0x14
+#define COLORM_ACTIVE_LOW BIT(4)
+#define SHUTD_ACTIVE_LOW BIT(3)
+#define HSYNC_ACTIVE_LOW BIT(2)
+#define VSYNC_ACTIVE_LOW BIT(1)
+#define DATAEN_ACTIVE_LOW BIT(0)
+
+#define DSI_DPI_LP_CMD_TIM 0x18
+#define OUTVACT_LPCMD_TIME(p) (((p) & 0xff) << 16)
+#define INVACT_LPCMD_TIME(p) ((p) & 0xff)
+
+#define DSI_DBI_CFG 0x20
+#define DSI_DBI_CMDSIZE 0x28
+
+#define DSI_PCKHDL_CFG 0x2c
+#define EN_CRC_RX BIT(4)
+#define EN_ECC_RX BIT(3)
+#define EN_BTA BIT(2)
+#define EN_EOTP_RX BIT(1)
+#define EN_EOTP_TX BIT(0)
+
+#define DSI_MODE_CFG 0x34
+#define ENABLE_VIDEO_MODE 0
+#define ENABLE_CMD_MODE BIT(0)
+
+#define DSI_VID_MODE_CFG 0x38
+#define FRAME_BTA_ACK BIT(14)
+#define ENABLE_LOW_POWER (0x3f << 8)
+#define ENABLE_LOW_POWER_MASK (0x3f << 8)
+#define VID_MODE_TYPE_NON_BURST_SYNC_PULSES 0x0
+#define VID_MODE_TYPE_NON_BURST_SYNC_EVENTS 0x1
+#define VID_MODE_TYPE_BURST 0x2
+#define VID_MODE_TYPE_MASK 0x3
+
+#define DSI_VID_PKT_SIZE 0x3c
+#define VID_PKT_SIZE(p) (((p) & 0x3fff) << 0)
+#define VID_PKT_MAX_SIZE 0x3fff
+
+#define DSI_VID_HSA_TIME 0x48
+#define DSI_VID_HBP_TIME 0x4c
+#define DSI_VID_HLINE_TIME 0x50
+#define DSI_VID_VSA_LINES 0x54
+#define DSI_VID_VBP_LINES 0x58
+#define DSI_VID_VFP_LINES 0x5c
+#define DSI_VID_VACTIVE_LINES 0x60
+#define DSI_CMD_MODE_CFG 0x68
+#define MAX_RD_PKT_SIZE_LP BIT(24)
+#define DCS_LW_TX_LP BIT(19)
+#define DCS_SR_0P_TX_LP BIT(18)
+#define DCS_SW_1P_TX_LP BIT(17)
+#define DCS_SW_0P_TX_LP BIT(16)
+#define GEN_LW_TX_LP BIT(14)
+#define GEN_SR_2P_TX_LP BIT(13)
+#define GEN_SR_1P_TX_LP BIT(12)
+#define GEN_SR_0P_TX_LP BIT(11)
+#define GEN_SW_2P_TX_LP BIT(10)
+#define GEN_SW_1P_TX_LP BIT(9)
+#define GEN_SW_0P_TX_LP BIT(8)
+#define EN_ACK_RQST BIT(1)
+#define EN_TEAR_FX BIT(0)
+
+#define CMD_MODE_ALL_LP (MAX_RD_PKT_SIZE_LP | \
+ DCS_LW_TX_LP | \
+ DCS_SR_0P_TX_LP | \
+ DCS_SW_1P_TX_LP | \
+ DCS_SW_0P_TX_LP | \
+ GEN_LW_TX_LP | \
+ GEN_SR_2P_TX_LP | \
+ GEN_SR_1P_TX_LP | \
+ GEN_SR_0P_TX_LP | \
+ GEN_SW_2P_TX_LP | \
+ GEN_SW_1P_TX_LP | \
+ GEN_SW_0P_TX_LP)
+
+#define DSI_GEN_HDR 0x6c
+#define GEN_HDATA(data) (((data) & 0xffff) << 8)
+#define GEN_HDATA_MASK (0xffff << 8)
+#define GEN_HTYPE(type) (((type) & 0xff) << 0)
+#define GEN_HTYPE_MASK 0xff
+
+#define DSI_GEN_PLD_DATA 0x70
+
+#define DSI_CMD_PKT_STATUS 0x74
+#define GEN_CMD_EMPTY BIT(0)
+#define GEN_CMD_FULL BIT(1)
+#define GEN_PLD_W_EMPTY BIT(2)
+#define GEN_PLD_W_FULL BIT(3)
+#define GEN_PLD_R_EMPTY BIT(4)
+#define GEN_PLD_R_FULL BIT(5)
+#define GEN_RD_CMD_BUSY BIT(6)
+
+#define DSI_TO_CNT_CFG 0x78
+#define HSTX_TO_CNT(p) (((p) & 0xffff) << 16)
+#define LPRX_TO_CNT(p) ((p) & 0xffff)
+
+#define DSI_BTA_TO_CNT 0x8c
+#define DSI_LPCLK_CTRL 0x94
+#define AUTO_CLKLANE_CTRL BIT(1)
+#define PHY_TXREQUESTCLKHS BIT(0)
+
+#define DSI_PHY_TMR_LPCLK_CFG 0x98
+#define PHY_CLKHS2LP_TIME(lbcc) (((lbcc) & 0x3ff) << 16)
+#define PHY_CLKLP2HS_TIME(lbcc) ((lbcc) & 0x3ff)
+
+#define DSI_PHY_TMR_CFG 0x9c
+#define PHY_HS2LP_TIME(lbcc) (((lbcc) & 0xff) << 24)
+#define PHY_LP2HS_TIME(lbcc) (((lbcc) & 0xff) << 16)
+#define MAX_RD_TIME(lbcc) ((lbcc) & 0x7fff)
+
+#define DSI_PHY_RSTZ 0xa0
+#define PHY_DISFORCEPLL 0
+#define PHY_ENFORCEPLL BIT(3)
+#define PHY_DISABLECLK 0
+#define PHY_ENABLECLK BIT(2)
+#define PHY_RSTZ 0
+#define PHY_UNRSTZ BIT(1)
+#define PHY_SHUTDOWNZ 0
+#define PHY_UNSHUTDOWNZ BIT(0)
+
+#define DSI_PHY_IF_CFG 0xa4
+#define N_LANES(n) ((((n) - 1) & 0x3) << 0)
+#define PHY_STOP_WAIT_TIME(cycle) (((cycle) & 0xff) << 8)
+
+#define DSI_PHY_STATUS 0xb0
+#define LOCK BIT(0)
+#define STOP_STATE_CLK_LANE BIT(2)
+
+#define DSI_PHY_TST_CTRL0 0xb4
+#define PHY_TESTCLK BIT(1)
+#define PHY_UNTESTCLK 0
+#define PHY_TESTCLR BIT(0)
+#define PHY_UNTESTCLR 0
+
+#define DSI_PHY_TST_CTRL1 0xb8
+#define PHY_TESTEN BIT(16)
+#define PHY_UNTESTEN 0
+#define PHY_TESTDOUT(n) (((n) & 0xff) << 8)
+#define PHY_TESTDIN(n) (((n) & 0xff) << 0)
+
+#define DSI_INT_ST0 0xbc
+#define DSI_INT_ST1 0xc0
+#define DSI_INT_MSK0 0xc4
+#define DSI_INT_MSK1 0xc8
+
+#define PHY_STATUS_TIMEOUT_US 10000
+#define CMD_PKT_STATUS_TIMEOUT_US 20000
+
+struct dw_mipi_dsi {
+ struct drm_bridge bridge;
+ struct mipi_dsi_host dsi_host;
+ struct drm_bridge *panel_bridge;
+ bool is_panel_bridge;
+ struct device *dev;
+ void __iomem *base;
+
+ struct clk *pclk;
+
+ unsigned int lane_mbps; /* per lane */
+ u32 channel;
+ u32 lanes;
+ u32 format;
+ unsigned long mode_flags;
+
+ const struct dw_mipi_dsi_plat_data *plat_data;
+};
+
+/*
+ * The controller should generate 2 frames before
+ * preparing the peripheral.
+ */
+static void dw_mipi_dsi_wait_for_two_frames(struct drm_display_mode *mode)
+{
+ int refresh, two_frames;
+
+ refresh = drm_mode_vrefresh(mode);
+ two_frames = DIV_ROUND_UP(MSEC_PER_SEC, refresh) * 2;
+ msleep(two_frames);
+}
+
+static inline struct dw_mipi_dsi *host_to_dsi(struct mipi_dsi_host *host)
+{
+ return container_of(host, struct dw_mipi_dsi, dsi_host);
+}
+
+static inline struct dw_mipi_dsi *bridge_to_dsi(struct drm_bridge *bridge)
+{
+ return container_of(bridge, struct dw_mipi_dsi, bridge);
+}
+
+static inline void dsi_write(struct dw_mipi_dsi *dsi, u32 reg, u32 val)
+{
+ writel(val, dsi->base + reg);
+}
+
+static inline u32 dsi_read(struct dw_mipi_dsi *dsi, u32 reg)
+{
+ return readl(dsi->base + reg);
+}
+
+static int dw_mipi_dsi_host_attach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *device)
+{
+ struct dw_mipi_dsi *dsi = host_to_dsi(host);
+ struct drm_bridge *bridge;
+ struct drm_panel *panel;
+ int ret;
+
+ if (device->lanes > dsi->plat_data->max_data_lanes) {
+ dev_err(dsi->dev, "the number of data lanes(%u) is too many\n",
+ device->lanes);
+ return -EINVAL;
+ }
+
+ dsi->lanes = device->lanes;
+ dsi->channel = device->channel;
+ dsi->format = device->format;
+ dsi->mode_flags = device->mode_flags;
+
+ ret = drm_of_find_panel_or_bridge(host->dev->of_node, 1, 0,
+ &panel, &bridge);
+ if (ret)
+ return ret;
+
+ if (panel) {
+ bridge = drm_panel_bridge_add(panel, DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(bridge))
+ return PTR_ERR(bridge);
+ dsi->is_panel_bridge = true;
+ }
+
+ dsi->panel_bridge = bridge;
+
+ drm_bridge_add(&dsi->bridge);
+
+ return 0;
+}
+
+static int dw_mipi_dsi_host_detach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *device)
+{
+ struct dw_mipi_dsi *dsi = host_to_dsi(host);
+
+ if (dsi->is_panel_bridge)
+ drm_panel_bridge_remove(dsi->panel_bridge);
+
+ drm_bridge_remove(&dsi->bridge);
+
+ return 0;
+}
+
+static void dw_mipi_message_config(struct dw_mipi_dsi *dsi,
+ const struct mipi_dsi_msg *msg)
+{
+ bool lpm = msg->flags & MIPI_DSI_MSG_USE_LPM;
+ u32 val = 0;
+
+ if (msg->flags & MIPI_DSI_MSG_REQ_ACK)
+ val |= EN_ACK_RQST;
+ if (lpm)
+ val |= CMD_MODE_ALL_LP;
+
+ dsi_write(dsi, DSI_LPCLK_CTRL, lpm ? 0 : PHY_TXREQUESTCLKHS);
+ dsi_write(dsi, DSI_CMD_MODE_CFG, val);
+}
+
+static int dw_mipi_dsi_gen_pkt_hdr_write(struct dw_mipi_dsi *dsi, u32 hdr_val)
+{
+ int ret;
+ u32 val, mask;
+
+ ret = readl_poll_timeout(dsi->base + DSI_CMD_PKT_STATUS,
+ val, !(val & GEN_CMD_FULL), 1000,
+ CMD_PKT_STATUS_TIMEOUT_US);
+ if (ret < 0) {
+ dev_err(dsi->dev, "failed to get available command FIFO\n");
+ return ret;
+ }
+
+ dsi_write(dsi, DSI_GEN_HDR, hdr_val);
+
+ mask = GEN_CMD_EMPTY | GEN_PLD_W_EMPTY;
+ ret = readl_poll_timeout(dsi->base + DSI_CMD_PKT_STATUS,
+ val, (val & mask) == mask,
+ 1000, CMD_PKT_STATUS_TIMEOUT_US);
+ if (ret < 0) {
+ dev_err(dsi->dev, "failed to write command FIFO\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int dw_mipi_dsi_dcs_short_write(struct dw_mipi_dsi *dsi,
+ const struct mipi_dsi_msg *msg)
+{
+ const u8 *tx_buf = msg->tx_buf;
+ u16 data = 0;
+ u32 val;
+
+ if (msg->tx_len > 0)
+ data |= tx_buf[0];
+ if (msg->tx_len > 1)
+ data |= tx_buf[1] << 8;
+
+ if (msg->tx_len > 2) {
+ dev_err(dsi->dev, "too long tx buf length %zu for short write\n",
+ msg->tx_len);
+ return -EINVAL;
+ }
+
+ val = GEN_HDATA(data) | GEN_HTYPE(msg->type);
+ return dw_mipi_dsi_gen_pkt_hdr_write(dsi, val);
+}
+
+static int dw_mipi_dsi_dcs_long_write(struct dw_mipi_dsi *dsi,
+ const struct mipi_dsi_msg *msg)
+{
+ const u8 *tx_buf = msg->tx_buf;
+ int len = msg->tx_len, pld_data_bytes = sizeof(u32), ret;
+ u32 hdr_val = GEN_HDATA(msg->tx_len) | GEN_HTYPE(msg->type);
+ u32 remainder;
+ u32 val;
+
+ if (msg->tx_len < 3) {
+ dev_err(dsi->dev, "wrong tx buf length %zu for long write\n",
+ msg->tx_len);
+ return -EINVAL;
+ }
+
+ while (DIV_ROUND_UP(len, pld_data_bytes)) {
+ if (len < pld_data_bytes) {
+ remainder = 0;
+ memcpy(&remainder, tx_buf, len);
+ dsi_write(dsi, DSI_GEN_PLD_DATA, remainder);
+ len = 0;
+ } else {
+ memcpy(&remainder, tx_buf, pld_data_bytes);
+ dsi_write(dsi, DSI_GEN_PLD_DATA, remainder);
+ tx_buf += pld_data_bytes;
+ len -= pld_data_bytes;
+ }
+
+ ret = readl_poll_timeout(dsi->base + DSI_CMD_PKT_STATUS,
+ val, !(val & GEN_PLD_W_FULL), 1000,
+ CMD_PKT_STATUS_TIMEOUT_US);
+ if (ret < 0) {
+ dev_err(dsi->dev,
+ "failed to get available write payload FIFO\n");
+ return ret;
+ }
+ }
+
+ return dw_mipi_dsi_gen_pkt_hdr_write(dsi, hdr_val);
+}
+
+static ssize_t dw_mipi_dsi_host_transfer(struct mipi_dsi_host *host,
+ const struct mipi_dsi_msg *msg)
+{
+ struct dw_mipi_dsi *dsi = host_to_dsi(host);
+ int ret;
+
+ /*
+ * TODO dw drv improvements
+ * use mipi_dsi_create_packet() instead of all following
+ * functions and code (no switch cases, no
+ * dw_mipi_dsi_dcs_short_write(), only the loop in long_write...)
+ * and use packet.header...
+ */
+ dw_mipi_message_config(dsi, msg);
+
+ switch (msg->type) {
+ case MIPI_DSI_DCS_SHORT_WRITE:
+ case MIPI_DSI_DCS_SHORT_WRITE_PARAM:
+ case MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE:
+ ret = dw_mipi_dsi_dcs_short_write(dsi, msg);
+ break;
+ case MIPI_DSI_DCS_LONG_WRITE:
+ ret = dw_mipi_dsi_dcs_long_write(dsi, msg);
+ break;
+ default:
+ dev_err(dsi->dev, "unsupported message type 0x%02x\n",
+ msg->type);
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static const struct mipi_dsi_host_ops dw_mipi_dsi_host_ops = {
+ .attach = dw_mipi_dsi_host_attach,
+ .detach = dw_mipi_dsi_host_detach,
+ .transfer = dw_mipi_dsi_host_transfer,
+};
+
+static void dw_mipi_dsi_video_mode_config(struct dw_mipi_dsi *dsi)
+{
+ u32 val;
+
+ /*
+ * TODO dw drv improvements
+ * enabling low power is panel-dependent, we should use the
+ * panel configuration here...
+ */
+ val = ENABLE_LOW_POWER;
+
+ if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST)
+ val |= VID_MODE_TYPE_BURST;
+ else if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)
+ val |= VID_MODE_TYPE_NON_BURST_SYNC_PULSES;
+ else
+ val |= VID_MODE_TYPE_NON_BURST_SYNC_EVENTS;
+
+ dsi_write(dsi, DSI_VID_MODE_CFG, val);
+}
+
+static void dw_mipi_dsi_set_mode(struct dw_mipi_dsi *dsi,
+ unsigned long mode_flags)
+{
+ dsi_write(dsi, DSI_PWR_UP, RESET);
+
+ if (mode_flags & MIPI_DSI_MODE_VIDEO) {
+ dsi_write(dsi, DSI_MODE_CFG, ENABLE_VIDEO_MODE);
+ dw_mipi_dsi_video_mode_config(dsi);
+ dsi_write(dsi, DSI_LPCLK_CTRL, PHY_TXREQUESTCLKHS);
+ } else {
+ dsi_write(dsi, DSI_MODE_CFG, ENABLE_CMD_MODE);
+ }
+
+ dsi_write(dsi, DSI_PWR_UP, POWERUP);
+}
+
+static void dw_mipi_dsi_disable(struct dw_mipi_dsi *dsi)
+{
+ dsi_write(dsi, DSI_PWR_UP, RESET);
+ dsi_write(dsi, DSI_PHY_RSTZ, PHY_RSTZ);
+}
+
+static void dw_mipi_dsi_init(struct dw_mipi_dsi *dsi)
+{
+ /*
+ * The maximum permitted escape clock is 20MHz and it is derived from
+ * lanebyteclk, which is running at "lane_mbps / 8". Thus we want:
+ *
+ * (lane_mbps >> 3) / esc_clk_division < 20
+ * which is:
+ * (lane_mbps >> 3) / 20 > esc_clk_division
+ */
+ u32 esc_clk_division = (dsi->lane_mbps >> 3) / 20 + 1;
+
+ dsi_write(dsi, DSI_PWR_UP, RESET);
+
+ /*
+ * TODO dw drv improvements
+ * timeout clock division should be computed with the
+ * high speed transmission counter timeout and byte lane...
+ */
+ dsi_write(dsi, DSI_CLKMGR_CFG, TO_CLK_DIVIDSION(10) |
+ TX_ESC_CLK_DIVIDSION(esc_clk_division));
+}
+
+static void dw_mipi_dsi_dpi_config(struct dw_mipi_dsi *dsi,
+ struct drm_display_mode *mode)
+{
+ u32 val = 0, color = 0;
+
+ switch (dsi->format) {
+ case MIPI_DSI_FMT_RGB888:
+ color = DPI_COLOR_CODING_24BIT;
+ break;
+ case MIPI_DSI_FMT_RGB666:
+ color = DPI_COLOR_CODING_18BIT_2 | EN18_LOOSELY;
+ break;
+ case MIPI_DSI_FMT_RGB666_PACKED:
+ color = DPI_COLOR_CODING_18BIT_1;
+ break;
+ case MIPI_DSI_FMT_RGB565:
+ color = DPI_COLOR_CODING_16BIT_1;
+ break;
+ }
+
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ val |= VSYNC_ACTIVE_LOW;
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ val |= HSYNC_ACTIVE_LOW;
+
+ dsi_write(dsi, DSI_DPI_VCID, DPI_VID(dsi->channel));
+ dsi_write(dsi, DSI_DPI_COLOR_CODING, color);
+ dsi_write(dsi, DSI_DPI_CFG_POL, val);
+ /*
+ * TODO dw drv improvements
+ * largest packet sizes during hfp or during vsa/vpb/vfp
+ * should be computed according to byte lane, lane number and only
+ * if sending lp cmds in high speed is enable (PHY_TXREQUESTCLKHS)
+ */
+ dsi_write(dsi, DSI_DPI_LP_CMD_TIM, OUTVACT_LPCMD_TIME(4)
+ | INVACT_LPCMD_TIME(4));
+}
+
+static void dw_mipi_dsi_packet_handler_config(struct dw_mipi_dsi *dsi)
+{
+ dsi_write(dsi, DSI_PCKHDL_CFG, EN_CRC_RX | EN_ECC_RX | EN_BTA);
+}
+
+static void dw_mipi_dsi_video_packet_config(struct dw_mipi_dsi *dsi,
+ struct drm_display_mode *mode)
+{
+ /*
+ * TODO dw drv improvements
+ * only burst mode is supported here. For non-burst video modes,
+ * we should compute DSI_VID_PKT_SIZE, DSI_VCCR.NUMC &
+ * DSI_VNPCR.NPSIZE... especially because this driver supports
+ * non-burst video modes, see dw_mipi_dsi_video_mode_config()...
+ */
+ dsi_write(dsi, DSI_VID_PKT_SIZE, VID_PKT_SIZE(mode->hdisplay));
+}
+
+static void dw_mipi_dsi_command_mode_config(struct dw_mipi_dsi *dsi)
+{
+ /*
+ * TODO dw drv improvements
+ * compute high speed transmission counter timeout according
+ * to the timeout clock division (TO_CLK_DIVIDSION) and byte lane...
+ */
+ dsi_write(dsi, DSI_TO_CNT_CFG, HSTX_TO_CNT(1000) | LPRX_TO_CNT(1000));
+ /*
+ * TODO dw drv improvements
+ * the Bus-Turn-Around Timeout Counter should be computed
+ * according to byte lane...
+ */
+ dsi_write(dsi, DSI_BTA_TO_CNT, 0xd00);
+ dsi_write(dsi, DSI_MODE_CFG, ENABLE_CMD_MODE);
+}
+
+/* Get lane byte clock cycles. */
+static u32 dw_mipi_dsi_get_hcomponent_lbcc(struct dw_mipi_dsi *dsi,
+ struct drm_display_mode *mode,
+ u32 hcomponent)
+{
+ u32 frac, lbcc;
+
+ lbcc = hcomponent * dsi->lane_mbps * MSEC_PER_SEC / 8;
+
+ frac = lbcc % mode->clock;
+ lbcc = lbcc / mode->clock;
+ if (frac)
+ lbcc++;
+
+ return lbcc;
+}
+
+static void dw_mipi_dsi_line_timer_config(struct dw_mipi_dsi *dsi,
+ struct drm_display_mode *mode)
+{
+ u32 htotal, hsa, hbp, lbcc;
+
+ htotal = mode->htotal;
+ hsa = mode->hsync_end - mode->hsync_start;
+ hbp = mode->htotal - mode->hsync_end;
+
+ /*
+ * TODO dw drv improvements
+ * computations below may be improved...
+ */
+ lbcc = dw_mipi_dsi_get_hcomponent_lbcc(dsi, mode, htotal);
+ dsi_write(dsi, DSI_VID_HLINE_TIME, lbcc);
+
+ lbcc = dw_mipi_dsi_get_hcomponent_lbcc(dsi, mode, hsa);
+ dsi_write(dsi, DSI_VID_HSA_TIME, lbcc);
+
+ lbcc = dw_mipi_dsi_get_hcomponent_lbcc(dsi, mode, hbp);
+ dsi_write(dsi, DSI_VID_HBP_TIME, lbcc);
+}
+
+static void dw_mipi_dsi_vertical_timing_config(struct dw_mipi_dsi *dsi,
+ struct drm_display_mode *mode)
+{
+ u32 vactive, vsa, vfp, vbp;
+
+ vactive = mode->vdisplay;
+ vsa = mode->vsync_end - mode->vsync_start;
+ vfp = mode->vsync_start - mode->vdisplay;
+ vbp = mode->vtotal - mode->vsync_end;
+
+ dsi_write(dsi, DSI_VID_VACTIVE_LINES, vactive);
+ dsi_write(dsi, DSI_VID_VSA_LINES, vsa);
+ dsi_write(dsi, DSI_VID_VFP_LINES, vfp);
+ dsi_write(dsi, DSI_VID_VBP_LINES, vbp);
+}
+
+static void dw_mipi_dsi_dphy_timing_config(struct dw_mipi_dsi *dsi)
+{
+ /*
+ * TODO dw drv improvements
+ * data & clock lane timers should be computed according to panel
+ * blankings and to the automatic clock lane control mode...
+ * note: DSI_PHY_TMR_CFG.MAX_RD_TIME should be in line with
+ * DSI_CMD_MODE_CFG.MAX_RD_PKT_SIZE_LP (see CMD_MODE_ALL_LP)
+ */
+ dsi_write(dsi, DSI_PHY_TMR_CFG, PHY_HS2LP_TIME(0x40)
+ | PHY_LP2HS_TIME(0x40) | MAX_RD_TIME(10000));
+
+ dsi_write(dsi, DSI_PHY_TMR_LPCLK_CFG, PHY_CLKHS2LP_TIME(0x40)
+ | PHY_CLKLP2HS_TIME(0x40));
+}
+
+static void dw_mipi_dsi_dphy_interface_config(struct dw_mipi_dsi *dsi)
+{
+ /*
+ * TODO dw drv improvements
+ * stop wait time should be the maximum between host dsi
+ * and panel stop wait times
+ */
+ dsi_write(dsi, DSI_PHY_IF_CFG, PHY_STOP_WAIT_TIME(0x20) |
+ N_LANES(dsi->lanes));
+}
+
+static void dw_mipi_dsi_dphy_init(struct dw_mipi_dsi *dsi)
+{
+ /* Clear PHY state */
+ dsi_write(dsi, DSI_PHY_RSTZ, PHY_DISFORCEPLL | PHY_DISABLECLK
+ | PHY_RSTZ | PHY_SHUTDOWNZ);
+ dsi_write(dsi, DSI_PHY_TST_CTRL0, PHY_UNTESTCLR);
+ dsi_write(dsi, DSI_PHY_TST_CTRL0, PHY_TESTCLR);
+ dsi_write(dsi, DSI_PHY_TST_CTRL0, PHY_UNTESTCLR);
+}
+
+static void dw_mipi_dsi_dphy_enable(struct dw_mipi_dsi *dsi)
+{
+ u32 val;
+ int ret;
+
+ dsi_write(dsi, DSI_PHY_RSTZ, PHY_ENFORCEPLL | PHY_ENABLECLK |
+ PHY_UNRSTZ | PHY_UNSHUTDOWNZ);
+
+ ret = readl_poll_timeout(dsi->base + DSI_PHY_STATUS,
+ val, val & LOCK, 1000, PHY_STATUS_TIMEOUT_US);
+ if (ret < 0)
+ DRM_DEBUG_DRIVER("failed to wait phy lock state\n");
+
+ ret = readl_poll_timeout(dsi->base + DSI_PHY_STATUS,
+ val, val & STOP_STATE_CLK_LANE, 1000,
+ PHY_STATUS_TIMEOUT_US);
+ if (ret < 0)
+ DRM_DEBUG_DRIVER("failed to wait phy clk lane stop state\n");
+}
+
+static void dw_mipi_dsi_clear_err(struct dw_mipi_dsi *dsi)
+{
+ dsi_read(dsi, DSI_INT_ST0);
+ dsi_read(dsi, DSI_INT_ST1);
+ dsi_write(dsi, DSI_INT_MSK0, 0);
+ dsi_write(dsi, DSI_INT_MSK1, 0);
+}
+
+static void dw_mipi_dsi_bridge_post_disable(struct drm_bridge *bridge)
+{
+ struct dw_mipi_dsi *dsi = bridge_to_dsi(bridge);
+
+ /*
+ * Switch to command mode before panel-bridge post_disable &
+ * panel unprepare.
+ * Note: panel-bridge disable & panel disable has been called
+ * before by the drm framework.
+ */
+ dw_mipi_dsi_set_mode(dsi, 0);
+
+ /*
+ * TODO Only way found to call panel-bridge post_disable &
+ * panel unprepare before the dsi "final" disable...
+ * This needs to be fixed in the drm_bridge framework and the API
+ * needs to be updated to manage our own call chains...
+ */
+ dsi->panel_bridge->funcs->post_disable(dsi->panel_bridge);
+
+ dw_mipi_dsi_disable(dsi);
+ clk_disable_unprepare(dsi->pclk);
+ pm_runtime_put(dsi->dev);
+}
+
+void dw_mipi_dsi_bridge_mode_set(struct drm_bridge *bridge,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct dw_mipi_dsi *dsi = bridge_to_dsi(bridge);
+ const struct dw_mipi_dsi_phy_ops *phy_ops = dsi->plat_data->phy_ops;
+ void *priv_data = dsi->plat_data->priv_data;
+ int ret;
+
+ clk_prepare_enable(dsi->pclk);
+
+ ret = phy_ops->get_lane_mbps(priv_data, mode, dsi->mode_flags,
+ dsi->lanes, dsi->format, &dsi->lane_mbps);
+ if (ret)
+ DRM_DEBUG_DRIVER("Phy get_lane_mbps() failed\n");
+
+ pm_runtime_get_sync(dsi->dev);
+ dw_mipi_dsi_init(dsi);
+ dw_mipi_dsi_dpi_config(dsi, mode);
+ dw_mipi_dsi_packet_handler_config(dsi);
+ dw_mipi_dsi_video_mode_config(dsi);
+ dw_mipi_dsi_video_packet_config(dsi, mode);
+ dw_mipi_dsi_command_mode_config(dsi);
+ dw_mipi_dsi_line_timer_config(dsi, mode);
+ dw_mipi_dsi_vertical_timing_config(dsi, mode);
+
+ dw_mipi_dsi_dphy_init(dsi);
+ dw_mipi_dsi_dphy_timing_config(dsi);
+ dw_mipi_dsi_dphy_interface_config(dsi);
+
+ dw_mipi_dsi_clear_err(dsi);
+
+ ret = phy_ops->init(priv_data);
+ if (ret)
+ DRM_DEBUG_DRIVER("Phy init() failed\n");
+
+ dw_mipi_dsi_dphy_enable(dsi);
+
+ dw_mipi_dsi_wait_for_two_frames(mode);
+
+ /* Switch to cmd mode for panel-bridge pre_enable & panel prepare */
+ dw_mipi_dsi_set_mode(dsi, 0);
+}
+
+static void dw_mipi_dsi_bridge_enable(struct drm_bridge *bridge)
+{
+ struct dw_mipi_dsi *dsi = bridge_to_dsi(bridge);
+
+ /* Switch to video mode for panel-bridge enable & panel enable */
+ dw_mipi_dsi_set_mode(dsi, MIPI_DSI_MODE_VIDEO);
+}
+
+static enum drm_mode_status
+dw_mipi_dsi_bridge_mode_valid(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode)
+{
+ struct dw_mipi_dsi *dsi = bridge_to_dsi(bridge);
+ const struct dw_mipi_dsi_plat_data *pdata = dsi->plat_data;
+ enum drm_mode_status mode_status = MODE_OK;
+
+ if (pdata->mode_valid)
+ mode_status = pdata->mode_valid(pdata->priv_data, mode);
+
+ return mode_status;
+}
+
+static int dw_mipi_dsi_bridge_attach(struct drm_bridge *bridge)
+{
+ struct dw_mipi_dsi *dsi = bridge_to_dsi(bridge);
+
+ if (!bridge->encoder) {
+ DRM_ERROR("Parent encoder object not found\n");
+ return -ENODEV;
+ }
+
+ /* Set the encoder type as caller does not know it */
+ bridge->encoder->encoder_type = DRM_MODE_ENCODER_DSI;
+
+ /* Attach the panel-bridge to the dsi bridge */
+ return drm_bridge_attach(bridge->encoder, dsi->panel_bridge, bridge);
+}
+
+static const struct drm_bridge_funcs dw_mipi_dsi_bridge_funcs = {
+ .mode_set = dw_mipi_dsi_bridge_mode_set,
+ .enable = dw_mipi_dsi_bridge_enable,
+ .post_disable = dw_mipi_dsi_bridge_post_disable,
+ .mode_valid = dw_mipi_dsi_bridge_mode_valid,
+ .attach = dw_mipi_dsi_bridge_attach,
+};
+
+static struct dw_mipi_dsi *
+__dw_mipi_dsi_probe(struct platform_device *pdev,
+ const struct dw_mipi_dsi_plat_data *plat_data)
+{
+ struct device *dev = &pdev->dev;
+ struct reset_control *apb_rst;
+ struct dw_mipi_dsi *dsi;
+ struct resource *res;
+ int ret;
+
+ dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL);
+ if (!dsi)
+ return ERR_PTR(-ENOMEM);
+
+ dsi->dev = dev;
+ dsi->plat_data = plat_data;
+
+ if (!plat_data->phy_ops->init || !plat_data->phy_ops->get_lane_mbps) {
+ DRM_ERROR("Phy not properly configured\n");
+ return ERR_PTR(-ENODEV);
+ }
+
+ if (!plat_data->base) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return ERR_PTR(-ENODEV);
+
+ dsi->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(dsi->base))
+ return ERR_PTR(-ENODEV);
+
+ } else {
+ dsi->base = plat_data->base;
+ }
+
+ dsi->pclk = devm_clk_get(dev, "pclk");
+ if (IS_ERR(dsi->pclk)) {
+ ret = PTR_ERR(dsi->pclk);
+ dev_err(dev, "Unable to get pclk: %d\n", ret);
+ return ERR_PTR(ret);
+ }
+
+ /*
+ * Note that the reset was not defined in the initial device tree, so
+ * we have to be prepared for it not being found.
+ */
+ apb_rst = devm_reset_control_get(dev, "apb");
+ if (IS_ERR(apb_rst)) {
+ ret = PTR_ERR(apb_rst);
+ if (ret == -ENOENT) {
+ apb_rst = NULL;
+ } else {
+ dev_err(dev, "Unable to get reset control: %d\n", ret);
+ return ERR_PTR(ret);
+ }
+ }
+
+ if (apb_rst) {
+ ret = clk_prepare_enable(dsi->pclk);
+ if (ret) {
+ dev_err(dev, "%s: Failed to enable pclk\n", __func__);
+ return ERR_PTR(ret);
+ }
+
+ reset_control_assert(apb_rst);
+ usleep_range(10, 20);
+ reset_control_deassert(apb_rst);
+
+ clk_disable_unprepare(dsi->pclk);
+ }
+
+ pm_runtime_enable(dev);
+
+ dsi->dsi_host.ops = &dw_mipi_dsi_host_ops;
+ dsi->dsi_host.dev = dev;
+ ret = mipi_dsi_host_register(&dsi->dsi_host);
+ if (ret) {
+ dev_err(dev, "Failed to register MIPI host: %d\n", ret);
+ return ERR_PTR(ret);
+ }
+
+ dsi->bridge.driver_private = dsi;
+ dsi->bridge.funcs = &dw_mipi_dsi_bridge_funcs;
+#ifdef CONFIG_OF
+ dsi->bridge.of_node = pdev->dev.of_node;
+#endif
+
+ dev_set_drvdata(dev, dsi);
+
+ return dsi;
+}
+
+static void __dw_mipi_dsi_remove(struct dw_mipi_dsi *dsi)
+{
+ pm_runtime_disable(dsi->dev);
+}
+
+/*
+ * Probe/remove API, used from platforms based on the DRM bridge API.
+ */
+int dw_mipi_dsi_probe(struct platform_device *pdev,
+ const struct dw_mipi_dsi_plat_data *plat_data)
+{
+ struct dw_mipi_dsi *dsi;
+
+ dsi = __dw_mipi_dsi_probe(pdev, plat_data);
+ if (IS_ERR(dsi))
+ return PTR_ERR(dsi);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dw_mipi_dsi_probe);
+
+void dw_mipi_dsi_remove(struct platform_device *pdev)
+{
+ struct dw_mipi_dsi *dsi = platform_get_drvdata(pdev);
+
+ mipi_dsi_host_unregister(&dsi->dsi_host);
+
+ __dw_mipi_dsi_remove(dsi);
+}
+EXPORT_SYMBOL_GPL(dw_mipi_dsi_remove);
+
+/*
+ * Bind/unbind API, used from platforms based on the component framework.
+ */
+int dw_mipi_dsi_bind(struct platform_device *pdev, struct drm_encoder *encoder,
+ const struct dw_mipi_dsi_plat_data *plat_data)
+{
+ struct dw_mipi_dsi *dsi;
+ int ret;
+
+ dsi = __dw_mipi_dsi_probe(pdev, plat_data);
+ if (IS_ERR(dsi))
+ return PTR_ERR(dsi);
+
+ ret = drm_bridge_attach(encoder, &dsi->bridge, NULL);
+ if (ret) {
+ dw_mipi_dsi_remove(pdev);
+ DRM_ERROR("Failed to initialize bridge with drm\n");
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dw_mipi_dsi_bind);
+
+void dw_mipi_dsi_unbind(struct device *dev)
+{
+ struct dw_mipi_dsi *dsi = dev_get_drvdata(dev);
+
+ __dw_mipi_dsi_remove(dsi);
+}
+EXPORT_SYMBOL_GPL(dw_mipi_dsi_unbind);
+
+MODULE_AUTHOR("Chris Zhong <zyw@rock-chips.com>");
+MODULE_AUTHOR("Philippe Cornu <philippe.cornu@st.com>");
+MODULE_DESCRIPTION("DW MIPI DSI host controller driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:dw-mipi-dsi");
diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
index 0529e500c534..8571cfd877c5 100644
--- a/drivers/gpu/drm/bridge/tc358767.c
+++ b/drivers/gpu/drm/bridge/tc358767.c
@@ -1160,7 +1160,6 @@ static const struct drm_connector_helper_funcs tc_connector_helper_funcs = {
};
static const struct drm_connector_funcs tc_connector_funcs = {
- .dpms = drm_atomic_helper_connector_dpms,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = drm_connector_cleanup,
.reset = drm_atomic_helper_connector_reset,
@@ -1325,11 +1324,7 @@ static int tc_probe(struct i2c_client *client, const struct i2c_device_id *id)
tc->bridge.funcs = &tc_bridge_funcs;
tc->bridge.of_node = dev->of_node;
- ret = drm_bridge_add(&tc->bridge);
- if (ret) {
- dev_err(dev, "Failed to add drm_bridge: %d\n", ret);
- goto err_unregister_aux;
- }
+ drm_bridge_add(&tc->bridge);
i2c_set_clientdata(client, tc);
diff --git a/drivers/gpu/drm/bridge/ti-tfp410.c b/drivers/gpu/drm/bridge/ti-tfp410.c
index eee4efda829e..acb857030951 100644
--- a/drivers/gpu/drm/bridge/ti-tfp410.c
+++ b/drivers/gpu/drm/bridge/ti-tfp410.c
@@ -102,7 +102,6 @@ tfp410_connector_detect(struct drm_connector *connector, bool force)
}
static const struct drm_connector_funcs tfp410_con_funcs = {
- .dpms = drm_atomic_helper_connector_dpms,
.detect = tfp410_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = drm_connector_cleanup,
@@ -237,11 +236,7 @@ static int tfp410_init(struct device *dev)
}
}
- ret = drm_bridge_add(&dvi->bridge);
- if (ret) {
- dev_err(dev, "drm_bridge_add() failed: %d\n", ret);
- goto fail;
- }
+ drm_bridge_add(&dvi->bridge);
return 0;
fail:
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.c b/drivers/gpu/drm/cirrus/cirrus_drv.c
index d893ea21a359..69c4e352dd78 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.c
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.c
@@ -132,7 +132,6 @@ static struct drm_driver driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM,
.load = cirrus_driver_load,
.unload = cirrus_driver_unload,
- .set_busid = drm_pci_set_busid,
.fops = &cirrus_driver_fops,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
@@ -143,7 +142,6 @@ static struct drm_driver driver = {
.gem_free_object_unlocked = cirrus_gem_free_object,
.dumb_create = cirrus_dumb_create,
.dumb_map_offset = cirrus_dumb_mmap_offset,
- .dumb_destroy = drm_gem_dumb_destroy,
};
static const struct dev_pm_ops cirrus_pm_ops = {
@@ -166,12 +164,12 @@ static int __init cirrus_init(void)
if (cirrus_modeset == 0)
return -EINVAL;
- return drm_pci_init(&driver, &cirrus_pci_driver);
+ return pci_register_driver(&cirrus_pci_driver);
}
static void __exit cirrus_exit(void)
{
- drm_pci_exit(&driver, &cirrus_pci_driver);
+ pci_unregister_driver(&cirrus_pci_driver);
}
module_init(cirrus_init);
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.h b/drivers/gpu/drm/cirrus/cirrus_drv.h
index 8690352d96f7..be2d7e488062 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.h
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.h
@@ -96,7 +96,6 @@
struct cirrus_crtc {
struct drm_crtc base;
- u8 lut_r[256], lut_g[256], lut_b[256];
int last_dpms;
bool enabled;
};
@@ -180,13 +179,6 @@ cirrus_bo(struct ttm_buffer_object *bo)
#define to_cirrus_obj(x) container_of(x, struct cirrus_gem_object, base)
#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
- /* cirrus_mode.c */
-void cirrus_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
- u16 blue, int regno);
-void cirrus_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
- u16 *blue, int regno);
-
-
/* cirrus_main.c */
int cirrus_device_init(struct cirrus_device *cdev,
struct drm_device *ddev,
diff --git a/drivers/gpu/drm/cirrus/cirrus_fbdev.c b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
index 7fa58eeadc9d..32fbfba2c623 100644
--- a/drivers/gpu/drm/cirrus/cirrus_fbdev.c
+++ b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
@@ -215,7 +215,6 @@ static int cirrusfb_create(struct drm_fb_helper *helper,
strcpy(info->fix.id, "cirrusdrmfb");
- info->flags = FBINFO_DEFAULT;
info->fbops = &cirrusfb_ops;
drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth);
@@ -252,7 +251,7 @@ static int cirrus_fbdev_destroy(struct drm_device *dev,
drm_fb_helper_unregister_fbi(&gfbdev->helper);
if (gfb->obj) {
- drm_gem_object_unreference_unlocked(gfb->obj);
+ drm_gem_object_put_unlocked(gfb->obj);
gfb->obj = NULL;
}
@@ -265,8 +264,6 @@ static int cirrus_fbdev_destroy(struct drm_device *dev,
}
static const struct drm_fb_helper_funcs cirrus_fb_helper_funcs = {
- .gamma_set = cirrus_crtc_fb_gamma_set,
- .gamma_get = cirrus_crtc_fb_gamma_get,
.fb_probe = cirrusfb_create,
};
diff --git a/drivers/gpu/drm/cirrus/cirrus_main.c b/drivers/gpu/drm/cirrus/cirrus_main.c
index e7fc95f63dca..b5f528543956 100644
--- a/drivers/gpu/drm/cirrus/cirrus_main.c
+++ b/drivers/gpu/drm/cirrus/cirrus_main.c
@@ -18,7 +18,7 @@ static void cirrus_user_framebuffer_destroy(struct drm_framebuffer *fb)
{
struct cirrus_framebuffer *cirrus_fb = to_cirrus_framebuffer(fb);
- drm_gem_object_unreference_unlocked(cirrus_fb->obj);
+ drm_gem_object_put_unlocked(cirrus_fb->obj);
drm_framebuffer_cleanup(fb);
kfree(fb);
}
@@ -67,13 +67,13 @@ cirrus_user_framebuffer_create(struct drm_device *dev,
cirrus_fb = kzalloc(sizeof(*cirrus_fb), GFP_KERNEL);
if (!cirrus_fb) {
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return ERR_PTR(-ENOMEM);
}
ret = cirrus_framebuffer_init(dev, cirrus_fb, mode_cmd, obj);
if (ret) {
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
kfree(cirrus_fb);
return ERR_PTR(ret);
}
@@ -261,7 +261,7 @@ int cirrus_dumb_create(struct drm_file *file,
return ret;
ret = drm_gem_handle_create(file, gobj, &handle);
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
if (ret)
return ret;
@@ -310,7 +310,7 @@ cirrus_dumb_mmap_offset(struct drm_file *file,
bo = gem_to_cirrus_bo(obj);
*offset = cirrus_bo_mmap_offset(bo);
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return 0;
}
diff --git a/drivers/gpu/drm/cirrus/cirrus_mode.c b/drivers/gpu/drm/cirrus/cirrus_mode.c
index 53f6f0f84206..a4c4a465b385 100644
--- a/drivers/gpu/drm/cirrus/cirrus_mode.c
+++ b/drivers/gpu/drm/cirrus/cirrus_mode.c
@@ -31,25 +31,6 @@
* This file contains setup code for the CRTC.
*/
-static void cirrus_crtc_load_lut(struct drm_crtc *crtc)
-{
- struct cirrus_crtc *cirrus_crtc = to_cirrus_crtc(crtc);
- struct drm_device *dev = crtc->dev;
- struct cirrus_device *cdev = dev->dev_private;
- int i;
-
- if (!crtc->enabled)
- return;
-
- for (i = 0; i < CIRRUS_LUT_SIZE; i++) {
- /* VGA registers */
- WREG8(PALETTE_INDEX, i);
- WREG8(PALETTE_DATA, cirrus_crtc->lut_r[i]);
- WREG8(PALETTE_DATA, cirrus_crtc->lut_g[i]);
- WREG8(PALETTE_DATA, cirrus_crtc->lut_b[i]);
- }
-}
-
/*
* The DRM core requires DPMS functions, but they make little sense in our
* case and so are just stubs
@@ -330,15 +311,25 @@ static int cirrus_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
u16 *blue, uint32_t size,
struct drm_modeset_acquire_ctx *ctx)
{
- struct cirrus_crtc *cirrus_crtc = to_cirrus_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct cirrus_device *cdev = dev->dev_private;
+ u16 *r, *g, *b;
int i;
- for (i = 0; i < size; i++) {
- cirrus_crtc->lut_r[i] = red[i];
- cirrus_crtc->lut_g[i] = green[i];
- cirrus_crtc->lut_b[i] = blue[i];
+ if (!crtc->enabled)
+ return 0;
+
+ r = crtc->gamma_store;
+ g = r + crtc->gamma_size;
+ b = g + crtc->gamma_size;
+
+ for (i = 0; i < CIRRUS_LUT_SIZE; i++) {
+ /* VGA registers */
+ WREG8(PALETTE_INDEX, i);
+ WREG8(PALETTE_DATA, *r++ >> 8);
+ WREG8(PALETTE_DATA, *g++ >> 8);
+ WREG8(PALETTE_DATA, *b++ >> 8);
}
- cirrus_crtc_load_lut(crtc);
return 0;
}
@@ -365,7 +356,6 @@ static const struct drm_crtc_helper_funcs cirrus_helper_funcs = {
.mode_set_base = cirrus_crtc_mode_set_base,
.prepare = cirrus_crtc_prepare,
.commit = cirrus_crtc_commit,
- .load_lut = cirrus_crtc_load_lut,
};
/* CRTC setup */
@@ -373,7 +363,6 @@ static void cirrus_crtc_init(struct drm_device *dev)
{
struct cirrus_device *cdev = dev->dev_private;
struct cirrus_crtc *cirrus_crtc;
- int i;
cirrus_crtc = kzalloc(sizeof(struct cirrus_crtc) +
(CIRRUSFB_CONN_LIMIT * sizeof(struct drm_connector *)),
@@ -387,37 +376,9 @@ static void cirrus_crtc_init(struct drm_device *dev)
drm_mode_crtc_set_gamma_size(&cirrus_crtc->base, CIRRUS_LUT_SIZE);
cdev->mode_info.crtc = cirrus_crtc;
- for (i = 0; i < CIRRUS_LUT_SIZE; i++) {
- cirrus_crtc->lut_r[i] = i;
- cirrus_crtc->lut_g[i] = i;
- cirrus_crtc->lut_b[i] = i;
- }
-
drm_crtc_helper_add(&cirrus_crtc->base, &cirrus_helper_funcs);
}
-/** Sets the color ramps on behalf of fbcon */
-void cirrus_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
- u16 blue, int regno)
-{
- struct cirrus_crtc *cirrus_crtc = to_cirrus_crtc(crtc);
-
- cirrus_crtc->lut_r[regno] = red;
- cirrus_crtc->lut_g[regno] = green;
- cirrus_crtc->lut_b[regno] = blue;
-}
-
-/** Gets the color ramps on behalf of fbcon */
-void cirrus_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
- u16 *blue, int regno)
-{
- struct cirrus_crtc *cirrus_crtc = to_cirrus_crtc(crtc);
-
- *red = cirrus_crtc->lut_r[regno];
- *green = cirrus_crtc->lut_g[regno];
- *blue = cirrus_crtc->lut_b[regno];
-}
-
static void cirrus_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index c0f336d23f9c..2fd383d7253a 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -29,7 +29,6 @@
#include <drm/drmP.h>
#include <drm/drm_atomic.h>
#include <drm/drm_mode.h>
-#include <drm/drm_plane_helper.h>
#include <drm/drm_print.h>
#include <linux/sync_file.h>
@@ -188,12 +187,15 @@ void drm_atomic_state_default_clear(struct drm_atomic_state *state)
}
for (i = 0; i < state->num_private_objs; i++) {
- void *obj_state = state->private_objs[i].obj_state;
+ struct drm_private_obj *obj = state->private_objs[i].ptr;
- state->private_objs[i].funcs->destroy_state(obj_state);
- state->private_objs[i].obj = NULL;
- state->private_objs[i].obj_state = NULL;
- state->private_objs[i].funcs = NULL;
+ if (!obj)
+ continue;
+
+ obj->funcs->atomic_destroy_state(obj,
+ state->private_objs[i].state);
+ state->private_objs[i].ptr = NULL;
+ state->private_objs[i].state = NULL;
}
state->num_private_objs = 0;
@@ -409,34 +411,6 @@ int drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state,
}
EXPORT_SYMBOL(drm_atomic_set_mode_prop_for_crtc);
-/**
- * drm_atomic_replace_property_blob - replace a blob property
- * @blob: a pointer to the member blob to be replaced
- * @new_blob: the new blob to replace with
- * @replaced: whether the blob has been replaced
- *
- * RETURNS:
- * Zero on success, error code on failure
- */
-static void
-drm_atomic_replace_property_blob(struct drm_property_blob **blob,
- struct drm_property_blob *new_blob,
- bool *replaced)
-{
- struct drm_property_blob *old_blob = *blob;
-
- if (old_blob == new_blob)
- return;
-
- drm_property_blob_put(old_blob);
- if (new_blob)
- drm_property_blob_get(new_blob);
- *blob = new_blob;
- *replaced = true;
-
- return;
-}
-
static int
drm_atomic_replace_property_blob_from_id(struct drm_device *dev,
struct drm_property_blob **blob,
@@ -457,7 +431,7 @@ drm_atomic_replace_property_blob_from_id(struct drm_device *dev,
}
}
- drm_atomic_replace_property_blob(blob, new_blob, replaced);
+ *replaced |= drm_property_replace_blob(blob, new_blob);
drm_property_blob_put(new_blob);
return 0;
@@ -739,7 +713,7 @@ EXPORT_SYMBOL(drm_atomic_get_plane_state);
* RETURNS:
* Zero on success, error code on failure
*/
-int drm_atomic_plane_set_property(struct drm_plane *plane,
+static int drm_atomic_plane_set_property(struct drm_plane *plane,
struct drm_plane_state *state, struct drm_property *property,
uint64_t val)
{
@@ -796,7 +770,6 @@ int drm_atomic_plane_set_property(struct drm_plane *plane,
return 0;
}
-EXPORT_SYMBOL(drm_atomic_plane_set_property);
/**
* drm_atomic_plane_get_property - get property value from plane state
@@ -991,11 +964,44 @@ static void drm_atomic_plane_print_state(struct drm_printer *p,
}
/**
+ * drm_atomic_private_obj_init - initialize private object
+ * @obj: private object
+ * @state: initial private object state
+ * @funcs: pointer to the struct of function pointers that identify the object
+ * type
+ *
+ * Initialize the private object, which can be embedded into any
+ * driver private object that needs its own atomic state.
+ */
+void
+drm_atomic_private_obj_init(struct drm_private_obj *obj,
+ struct drm_private_state *state,
+ const struct drm_private_state_funcs *funcs)
+{
+ memset(obj, 0, sizeof(*obj));
+
+ obj->state = state;
+ obj->funcs = funcs;
+}
+EXPORT_SYMBOL(drm_atomic_private_obj_init);
+
+/**
+ * drm_atomic_private_obj_fini - finalize private object
+ * @obj: private object
+ *
+ * Finalize the private object.
+ */
+void
+drm_atomic_private_obj_fini(struct drm_private_obj *obj)
+{
+ obj->funcs->atomic_destroy_state(obj, obj->state);
+}
+EXPORT_SYMBOL(drm_atomic_private_obj_fini);
+
+/**
* drm_atomic_get_private_obj_state - get private object state
* @state: global atomic state
* @obj: private object to get the state for
- * @funcs: pointer to the struct of function pointers that identify the object
- * type
*
* This function returns the private object state for the given private object,
* allocating the state if needed. It does not grab any locks as the caller is
@@ -1005,18 +1011,18 @@ static void drm_atomic_plane_print_state(struct drm_printer *p,
*
* Either the allocated state or the error code encoded into a pointer.
*/
-void *
-drm_atomic_get_private_obj_state(struct drm_atomic_state *state, void *obj,
- const struct drm_private_state_funcs *funcs)
+struct drm_private_state *
+drm_atomic_get_private_obj_state(struct drm_atomic_state *state,
+ struct drm_private_obj *obj)
{
int index, num_objs, i;
size_t size;
struct __drm_private_objs_state *arr;
+ struct drm_private_state *obj_state;
for (i = 0; i < state->num_private_objs; i++)
- if (obj == state->private_objs[i].obj &&
- state->private_objs[i].obj_state)
- return state->private_objs[i].obj_state;
+ if (obj == state->private_objs[i].ptr)
+ return state->private_objs[i].state;
num_objs = state->num_private_objs + 1;
size = sizeof(*state->private_objs) * num_objs;
@@ -1028,18 +1034,21 @@ drm_atomic_get_private_obj_state(struct drm_atomic_state *state, void *obj,
index = state->num_private_objs;
memset(&state->private_objs[index], 0, sizeof(*state->private_objs));
- state->private_objs[index].obj_state = funcs->duplicate_state(state, obj);
- if (!state->private_objs[index].obj_state)
+ obj_state = obj->funcs->atomic_duplicate_state(obj);
+ if (!obj_state)
return ERR_PTR(-ENOMEM);
- state->private_objs[index].obj = obj;
- state->private_objs[index].funcs = funcs;
+ state->private_objs[index].state = obj_state;
+ state->private_objs[index].old_state = obj->state;
+ state->private_objs[index].new_state = obj_state;
+ state->private_objs[index].ptr = obj;
+
state->num_private_objs = num_objs;
- DRM_DEBUG_ATOMIC("Added new private object state %p to %p\n",
- state->private_objs[index].obj_state, state);
+ DRM_DEBUG_ATOMIC("Added new private object %p state %p to %p\n",
+ obj, obj_state, state);
- return state->private_objs[index].obj_state;
+ return obj_state;
}
EXPORT_SYMBOL(drm_atomic_get_private_obj_state);
@@ -1135,7 +1144,7 @@ EXPORT_SYMBOL(drm_atomic_get_connector_state);
* RETURNS:
* Zero on success, error code on failure
*/
-int drm_atomic_connector_set_property(struct drm_connector *connector,
+static int drm_atomic_connector_set_property(struct drm_connector *connector,
struct drm_connector_state *state, struct drm_property *property,
uint64_t val)
{
@@ -1202,7 +1211,6 @@ int drm_atomic_connector_set_property(struct drm_connector *connector,
return 0;
}
-EXPORT_SYMBOL(drm_atomic_connector_set_property);
static void drm_atomic_connector_print_state(struct drm_printer *p,
const struct drm_connector_state *state)
@@ -1580,38 +1588,6 @@ drm_atomic_add_affected_planes(struct drm_atomic_state *state,
EXPORT_SYMBOL(drm_atomic_add_affected_planes);
/**
- * drm_atomic_legacy_backoff - locking backoff for legacy ioctls
- * @state: atomic state
- *
- * This function should be used by legacy entry points which don't understand
- * -EDEADLK semantics. For simplicity this one will grab all modeset locks after
- * the slowpath completed.
- */
-void drm_atomic_legacy_backoff(struct drm_atomic_state *state)
-{
- struct drm_device *dev = state->dev;
- int ret;
- bool global = false;
-
- if (WARN_ON(dev->mode_config.acquire_ctx == state->acquire_ctx)) {
- global = true;
-
- dev->mode_config.acquire_ctx = NULL;
- }
-
-retry:
- drm_modeset_backoff(state->acquire_ctx);
-
- ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
- if (ret)
- goto retry;
-
- if (global)
- dev->mode_config.acquire_ctx = state->acquire_ctx;
-}
-EXPORT_SYMBOL(drm_atomic_legacy_backoff);
-
-/**
* drm_atomic_check_only - check whether a given config would work
* @state: atomic configuration to check
*
@@ -1655,6 +1631,9 @@ int drm_atomic_check_only(struct drm_atomic_state *state)
if (config->funcs->atomic_check)
ret = config->funcs->atomic_check(state->dev, state);
+ if (ret)
+ return ret;
+
if (!state->allow_modeset) {
for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
if (drm_atomic_crtc_needs_modeset(crtc_state)) {
@@ -1665,7 +1644,7 @@ int drm_atomic_check_only(struct drm_atomic_state *state)
}
}
- return ret;
+ return 0;
}
EXPORT_SYMBOL(drm_atomic_check_only);
@@ -1854,9 +1833,60 @@ static struct drm_pending_vblank_event *create_vblank_event(
return e;
}
-static int atomic_set_prop(struct drm_atomic_state *state,
- struct drm_mode_object *obj, struct drm_property *prop,
- uint64_t prop_value)
+int drm_atomic_connector_commit_dpms(struct drm_atomic_state *state,
+ struct drm_connector *connector,
+ int mode)
+{
+ struct drm_connector *tmp_connector;
+ struct drm_connector_state *new_conn_state;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
+ int i, ret, old_mode = connector->dpms;
+ bool active = false;
+
+ ret = drm_modeset_lock(&state->dev->mode_config.connection_mutex,
+ state->acquire_ctx);
+ if (ret)
+ return ret;
+
+ if (mode != DRM_MODE_DPMS_ON)
+ mode = DRM_MODE_DPMS_OFF;
+ connector->dpms = mode;
+
+ crtc = connector->state->crtc;
+ if (!crtc)
+ goto out;
+ ret = drm_atomic_add_affected_connectors(state, crtc);
+ if (ret)
+ goto out;
+
+ crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(crtc_state)) {
+ ret = PTR_ERR(crtc_state);
+ goto out;
+ }
+
+ for_each_new_connector_in_state(state, tmp_connector, new_conn_state, i) {
+ if (new_conn_state->crtc != crtc)
+ continue;
+ if (tmp_connector->dpms == DRM_MODE_DPMS_ON) {
+ active = true;
+ break;
+ }
+ }
+
+ crtc_state->active = active;
+ ret = drm_atomic_commit(state);
+out:
+ if (ret != 0)
+ connector->dpms = old_mode;
+ return ret;
+}
+
+int drm_atomic_set_property(struct drm_atomic_state *state,
+ struct drm_mode_object *obj,
+ struct drm_property *prop,
+ uint64_t prop_value)
{
struct drm_mode_object *ref;
int ret;
@@ -2039,7 +2069,7 @@ static int prepare_crtc_signaling(struct drm_device *dev,
{
struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
- int i, ret;
+ int i, c = 0, ret;
if (arg->flags & DRM_MODE_ATOMIC_TEST_ONLY)
return 0;
@@ -2100,8 +2130,17 @@ static int prepare_crtc_signaling(struct drm_device *dev,
crtc_state->event->base.fence = fence;
}
+
+ c++;
}
+ /*
+ * Having this flag means user mode pends on event which will never
+ * reach due to lack of at least one CRTC for signaling
+ */
+ if (c == 0 && (arg->flags & DRM_MODE_PAGE_FLIP_EVENT))
+ return -EINVAL;
+
return 0;
}
@@ -2167,10 +2206,10 @@ int drm_mode_atomic_ioctl(struct drm_device *dev,
struct drm_atomic_state *state;
struct drm_modeset_acquire_ctx ctx;
struct drm_plane *plane;
- struct drm_out_fence_state *fence_state = NULL;
+ struct drm_out_fence_state *fence_state;
unsigned plane_mask;
int ret = 0;
- unsigned int i, j, num_fences = 0;
+ unsigned int i, j, num_fences;
/* disallow for drivers not supporting atomic: */
if (!drm_core_check_feature(dev, DRIVER_ATOMIC))
@@ -2211,6 +2250,8 @@ retry:
plane_mask = 0;
copied_objs = 0;
copied_props = 0;
+ fence_state = NULL;
+ num_fences = 0;
for (i = 0; i < arg->count_objs; i++) {
uint32_t obj_id, count_props;
@@ -2267,7 +2308,8 @@ retry:
goto out;
}
- ret = atomic_set_prop(state, obj, prop, prop_value);
+ ret = drm_atomic_set_property(state, obj, prop,
+ prop_value);
if (ret) {
drm_mode_object_put(obj);
goto out;
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 86d3093c6c9b..4e53aae9a1fb 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -795,6 +795,9 @@ int drm_atomic_helper_check(struct drm_device *dev,
if (ret)
return ret;
+ if (state->legacy_cursor_update)
+ state->async_update = !drm_atomic_helper_async_check(dev, state);
+
return ret;
}
EXPORT_SYMBOL(drm_atomic_helper_check);
@@ -918,16 +921,12 @@ drm_atomic_helper_update_legacy_modeset_state(struct drm_device *dev,
crtc = new_conn_state->crtc;
if ((!crtc && old_conn_state->crtc) ||
(crtc && drm_atomic_crtc_needs_modeset(crtc->state))) {
- struct drm_property *dpms_prop =
- dev->mode_config.dpms_property;
int mode = DRM_MODE_DPMS_OFF;
if (crtc && crtc->state->active)
mode = DRM_MODE_DPMS_ON;
connector->dpms = mode;
- drm_object_property_set_value(&connector->base,
- dpms_prop, mode);
}
}
@@ -1069,12 +1068,13 @@ void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
struct drm_atomic_state *old_state)
{
struct drm_crtc *crtc;
+ struct drm_crtc_state *old_crtc_state;
struct drm_crtc_state *new_crtc_state;
struct drm_connector *connector;
struct drm_connector_state *new_conn_state;
int i;
- for_each_new_crtc_in_state(old_state, crtc, new_crtc_state, i) {
+ for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) {
const struct drm_crtc_helper_funcs *funcs;
/* Need to filter out CRTCs where only planes change. */
@@ -1090,8 +1090,8 @@ void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
DRM_DEBUG_ATOMIC("enabling [CRTC:%d:%s]\n",
crtc->base.id, crtc->name);
- if (funcs->enable)
- funcs->enable(crtc);
+ if (funcs->atomic_enable)
+ funcs->atomic_enable(crtc, old_crtc_state);
else
funcs->commit(crtc);
}
@@ -1191,9 +1191,13 @@ EXPORT_SYMBOL(drm_atomic_helper_wait_for_fences);
*
* Helper to, after atomic commit, wait for vblanks on all effected
* crtcs (ie. before cleaning up old framebuffers using
- * drm_atomic_helper_cleanup_planes()). It will only wait on crtcs where the
+ * drm_atomic_helper_cleanup_planes()). It will only wait on CRTCs where the
* framebuffers have actually changed to optimize for the legacy cursor and
* plane update use-case.
+ *
+ * Drivers using the nonblocking commit tracking support initialized by calling
+ * drm_atomic_helper_setup_commit() should look at
+ * drm_atomic_helper_wait_for_flip_done() as an alternative.
*/
void
drm_atomic_helper_wait_for_vblanks(struct drm_device *dev,
@@ -1241,27 +1245,54 @@ drm_atomic_helper_wait_for_vblanks(struct drm_device *dev,
EXPORT_SYMBOL(drm_atomic_helper_wait_for_vblanks);
/**
+ * drm_atomic_helper_wait_for_flip_done - wait for all page flips to be done
+ * @dev: DRM device
+ * @old_state: atomic state object with old state structures
+ *
+ * Helper to, after atomic commit, wait for page flips on all effected
+ * crtcs (ie. before cleaning up old framebuffers using
+ * drm_atomic_helper_cleanup_planes()). Compared to
+ * drm_atomic_helper_wait_for_vblanks() this waits for the completion of on all
+ * CRTCs, assuming that cursors-only updates are signalling their completion
+ * immediately (or using a different path).
+ *
+ * This requires that drivers use the nonblocking commit tracking support
+ * initialized using drm_atomic_helper_setup_commit().
+ */
+void drm_atomic_helper_wait_for_flip_done(struct drm_device *dev,
+ struct drm_atomic_state *old_state)
+{
+ struct drm_crtc_state *unused;
+ struct drm_crtc *crtc;
+ int i;
+
+ for_each_new_crtc_in_state(old_state, crtc, unused, i) {
+ struct drm_crtc_commit *commit = old_state->crtcs[i].commit;
+ int ret;
+
+ if (!commit)
+ continue;
+
+ ret = wait_for_completion_timeout(&commit->flip_done, 10 * HZ);
+ if (ret == 0)
+ DRM_ERROR("[CRTC:%d:%s] flip_done timed out\n",
+ crtc->base.id, crtc->name);
+ }
+}
+EXPORT_SYMBOL(drm_atomic_helper_wait_for_flip_done);
+
+/**
* drm_atomic_helper_commit_tail - commit atomic update to hardware
* @old_state: atomic state object with old state structures
*
* This is the default implementation for the
- * &drm_mode_config_helper_funcs.atomic_commit_tail hook.
+ * &drm_mode_config_helper_funcs.atomic_commit_tail hook, for drivers
+ * that do not support runtime_pm or do not need the CRTC to be
+ * enabled to perform a commit. Otherwise, see
+ * drm_atomic_helper_commit_tail_rpm().
*
* Note that the default ordering of how the various stages are called is to
- * match the legacy modeset helper library closest. One peculiarity of that is
- * that it doesn't mesh well with runtime PM at all.
- *
- * For drivers supporting runtime PM the recommended sequence is instead ::
- *
- * drm_atomic_helper_commit_modeset_disables(dev, old_state);
- *
- * drm_atomic_helper_commit_modeset_enables(dev, old_state);
- *
- * drm_atomic_helper_commit_planes(dev, old_state,
- * DRM_PLANE_COMMIT_ACTIVE_ONLY);
- *
- * for committing the atomic update to hardware. See the kerneldoc entries for
- * these three functions for more details.
+ * match the legacy modeset helper library closest.
*/
void drm_atomic_helper_commit_tail(struct drm_atomic_state *old_state)
{
@@ -1281,6 +1312,35 @@ void drm_atomic_helper_commit_tail(struct drm_atomic_state *old_state)
}
EXPORT_SYMBOL(drm_atomic_helper_commit_tail);
+/**
+ * drm_atomic_helper_commit_tail_rpm - commit atomic update to hardware
+ * @old_state: new modeset state to be committed
+ *
+ * This is an alternative implementation for the
+ * &drm_mode_config_helper_funcs.atomic_commit_tail hook, for drivers
+ * that support runtime_pm or need the CRTC to be enabled to perform a
+ * commit. Otherwise, one should use the default implementation
+ * drm_atomic_helper_commit_tail().
+ */
+void drm_atomic_helper_commit_tail_rpm(struct drm_atomic_state *old_state)
+{
+ struct drm_device *dev = old_state->dev;
+
+ drm_atomic_helper_commit_modeset_disables(dev, old_state);
+
+ drm_atomic_helper_commit_modeset_enables(dev, old_state);
+
+ drm_atomic_helper_commit_planes(dev, old_state,
+ DRM_PLANE_COMMIT_ACTIVE_ONLY);
+
+ drm_atomic_helper_commit_hw_done(old_state);
+
+ drm_atomic_helper_wait_for_vblanks(dev, old_state);
+
+ drm_atomic_helper_cleanup_planes(dev, old_state);
+}
+EXPORT_SYMBOL(drm_atomic_helper_commit_tail_rpm);
+
static void commit_tail(struct drm_atomic_state *old_state)
{
struct drm_device *dev = old_state->dev;
@@ -1311,6 +1371,114 @@ static void commit_work(struct work_struct *work)
}
/**
+ * drm_atomic_helper_async_check - check if state can be commited asynchronously
+ * @dev: DRM device
+ * @state: the driver state object
+ *
+ * This helper will check if it is possible to commit the state asynchronously.
+ * Async commits are not supposed to swap the states like normal sync commits
+ * but just do in-place changes on the current state.
+ *
+ * It will return 0 if the commit can happen in an asynchronous fashion or error
+ * if not. Note that error just mean it can't be commited asynchronously, if it
+ * fails the commit should be treated like a normal synchronous commit.
+ */
+int drm_atomic_helper_async_check(struct drm_device *dev,
+ struct drm_atomic_state *state)
+{
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
+ struct drm_crtc_commit *commit;
+ struct drm_plane *__plane, *plane = NULL;
+ struct drm_plane_state *__plane_state, *plane_state = NULL;
+ const struct drm_plane_helper_funcs *funcs;
+ int i, j, n_planes = 0;
+
+ for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
+ if (drm_atomic_crtc_needs_modeset(crtc_state))
+ return -EINVAL;
+ }
+
+ for_each_new_plane_in_state(state, __plane, __plane_state, i) {
+ n_planes++;
+ plane = __plane;
+ plane_state = __plane_state;
+ }
+
+ /* FIXME: we support only single plane updates for now */
+ if (!plane || n_planes != 1)
+ return -EINVAL;
+
+ if (!plane_state->crtc)
+ return -EINVAL;
+
+ funcs = plane->helper_private;
+ if (!funcs->atomic_async_update)
+ return -EINVAL;
+
+ if (plane_state->fence)
+ return -EINVAL;
+
+ /*
+ * Don't do an async update if there is an outstanding commit modifying
+ * the plane. This prevents our async update's changes from getting
+ * overridden by a previous synchronous update's state.
+ */
+ for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
+ if (plane->crtc != crtc)
+ continue;
+
+ spin_lock(&crtc->commit_lock);
+ commit = list_first_entry_or_null(&crtc->commit_list,
+ struct drm_crtc_commit,
+ commit_entry);
+ if (!commit) {
+ spin_unlock(&crtc->commit_lock);
+ continue;
+ }
+ spin_unlock(&crtc->commit_lock);
+
+ if (!crtc->state->state)
+ continue;
+
+ for_each_plane_in_state(crtc->state->state, __plane,
+ __plane_state, j) {
+ if (__plane == plane)
+ return -EINVAL;
+ }
+ }
+
+ return funcs->atomic_async_check(plane, plane_state);
+}
+EXPORT_SYMBOL(drm_atomic_helper_async_check);
+
+/**
+ * drm_atomic_helper_async_commit - commit state asynchronously
+ * @dev: DRM device
+ * @state: the driver state object
+ *
+ * This function commits a state asynchronously, i.e., not vblank
+ * synchronized. It should be used on a state only when
+ * drm_atomic_async_check() succeeds. Async commits are not supposed to swap
+ * the states like normal sync commits, but just do in-place changes on the
+ * current state.
+ */
+void drm_atomic_helper_async_commit(struct drm_device *dev,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane *plane;
+ struct drm_plane_state *plane_state;
+ const struct drm_plane_helper_funcs *funcs;
+ int i;
+
+ for_each_new_plane_in_state(state, plane, plane_state, i) {
+ funcs = plane->helper_private;
+ funcs->atomic_async_update(plane, plane_state);
+ }
+}
+EXPORT_SYMBOL(drm_atomic_helper_async_commit);
+
+/**
* drm_atomic_helper_commit - commit validated state object
* @dev: DRM device
* @state: the driver state object
@@ -1334,6 +1502,17 @@ int drm_atomic_helper_commit(struct drm_device *dev,
{
int ret;
+ if (state->async_update) {
+ ret = drm_atomic_helper_prepare_planes(dev, state);
+ if (ret)
+ return ret;
+
+ drm_atomic_helper_async_commit(dev, state);
+ drm_atomic_helper_cleanup_planes(dev, state);
+
+ return 0;
+ }
+
ret = drm_atomic_helper_setup_commit(state, nonblock);
if (ret)
return ret;
@@ -1346,10 +1525,8 @@ int drm_atomic_helper_commit(struct drm_device *dev,
if (!nonblock) {
ret = drm_atomic_helper_wait_for_fences(dev, state, true);
- if (ret) {
- drm_atomic_helper_cleanup_planes(dev, state);
- return ret;
- }
+ if (ret)
+ goto err;
}
/*
@@ -1358,7 +1535,9 @@ int drm_atomic_helper_commit(struct drm_device *dev,
* the software side now.
*/
- drm_atomic_helper_swap_state(state, true);
+ ret = drm_atomic_helper_swap_state(state, true);
+ if (ret)
+ goto err;
/*
* Everything below can be run asynchronously without the need to grab
@@ -1387,6 +1566,10 @@ int drm_atomic_helper_commit(struct drm_device *dev,
commit_tail(state);
return 0;
+
+err:
+ drm_atomic_helper_cleanup_planes(dev, state);
+ return ret;
}
EXPORT_SYMBOL(drm_atomic_helper_commit);
@@ -1680,9 +1863,7 @@ void drm_atomic_helper_commit_hw_done(struct drm_atomic_state *old_state)
/* backend must have consumed any event by now */
WARN_ON(new_crtc_state->event);
- spin_lock(&crtc->commit_lock);
complete_all(&commit->hw_done);
- spin_unlock(&crtc->commit_lock);
}
}
EXPORT_SYMBOL(drm_atomic_helper_commit_hw_done);
@@ -1711,7 +1892,6 @@ void drm_atomic_helper_commit_cleanup_done(struct drm_atomic_state *old_state)
if (WARN_ON(!commit))
continue;
- spin_lock(&crtc->commit_lock);
complete_all(&commit->cleanup_done);
WARN_ON(!try_wait_for_completion(&commit->hw_done));
@@ -1721,8 +1901,6 @@ void drm_atomic_helper_commit_cleanup_done(struct drm_atomic_state *old_state)
if (try_wait_for_completion(&commit->flip_done))
goto del_commit;
- spin_unlock(&crtc->commit_lock);
-
/* We must wait for the vblank event to signal our completion
* before releasing our reference, since the vblank work does
* not hold a reference of its own. */
@@ -1732,8 +1910,8 @@ void drm_atomic_helper_commit_cleanup_done(struct drm_atomic_state *old_state)
DRM_ERROR("[CRTC:%d:%s] flip_done timed out\n",
crtc->base.id, crtc->name);
- spin_lock(&crtc->commit_lock);
del_commit:
+ spin_lock(&crtc->commit_lock);
list_del(&commit->commit_entry);
spin_unlock(&crtc->commit_lock);
}
@@ -2069,14 +2247,14 @@ EXPORT_SYMBOL(drm_atomic_helper_cleanup_planes);
/**
* drm_atomic_helper_swap_state - store atomic state into current sw state
* @state: atomic state
- * @stall: stall for proceeding commits
+ * @stall: stall for preceeding commits
*
* This function stores the atomic state into the current state pointers in all
* driver objects. It should be called after all failing steps have been done
* and succeeded, but before the actual hardware state is committed.
*
* For cleanup and error recovery the current state for all changed objects will
- * be swaped into @state.
+ * be swapped into @state.
*
* With that sequence it fits perfectly into the plane prepare/cleanup sequence:
*
@@ -2095,12 +2273,16 @@ EXPORT_SYMBOL(drm_atomic_helper_cleanup_planes);
* the &drm_plane.state, &drm_crtc.state or &drm_connector.state pointer. With
* the current atomic helpers this is almost always the case, since the helpers
* don't pass the right state structures to the callbacks.
+ *
+ * Returns:
+ *
+ * Returns 0 on success. Can return -ERESTARTSYS when @stall is true and the
+ * waiting for the previous commits has been interrupted.
*/
-void drm_atomic_helper_swap_state(struct drm_atomic_state *state,
+int drm_atomic_helper_swap_state(struct drm_atomic_state *state,
bool stall)
{
- int i;
- long ret;
+ int i, ret;
struct drm_connector *connector;
struct drm_connector_state *old_conn_state, *new_conn_state;
struct drm_crtc *crtc;
@@ -2108,8 +2290,8 @@ void drm_atomic_helper_swap_state(struct drm_atomic_state *state,
struct drm_plane *plane;
struct drm_plane_state *old_plane_state, *new_plane_state;
struct drm_crtc_commit *commit;
- void *obj, *obj_state;
- const struct drm_private_state_funcs *funcs;
+ struct drm_private_obj *obj;
+ struct drm_private_state *old_obj_state, *new_obj_state;
if (stall) {
for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
@@ -2123,12 +2305,11 @@ void drm_atomic_helper_swap_state(struct drm_atomic_state *state,
if (!commit)
continue;
- ret = wait_for_completion_timeout(&commit->hw_done,
- 10*HZ);
- if (ret == 0)
- DRM_ERROR("[CRTC:%d:%s] hw_done timed out\n",
- crtc->base.id, crtc->name);
+ ret = wait_for_completion_interruptible(&commit->hw_done);
drm_crtc_commit_put(commit);
+
+ if (ret)
+ return ret;
}
}
@@ -2171,8 +2352,17 @@ void drm_atomic_helper_swap_state(struct drm_atomic_state *state,
plane->state = new_plane_state;
}
- __for_each_private_obj(state, obj, obj_state, i, funcs)
- funcs->swap_state(obj, &state->private_objs[i].obj_state);
+ for_each_oldnew_private_obj_in_state(state, obj, old_obj_state, new_obj_state, i) {
+ WARN_ON(obj->state != old_obj_state);
+
+ old_obj_state->state = state;
+ new_obj_state->state = NULL;
+
+ state->private_objs[i].state = old_obj_state;
+ obj->state = new_obj_state;
+ }
+
+ return 0;
}
EXPORT_SYMBOL(drm_atomic_helper_swap_state);
@@ -2526,6 +2716,7 @@ int drm_atomic_helper_disable_all(struct drm_device *dev,
struct drm_plane *plane;
struct drm_crtc_state *crtc_state;
struct drm_crtc *crtc;
+ unsigned plane_mask = 0;
int ret, i;
state = drm_atomic_state_alloc(dev);
@@ -2556,22 +2747,26 @@ int drm_atomic_helper_disable_all(struct drm_device *dev,
goto free;
}
- for_each_connector_in_state(state, conn, conn_state, i) {
+ for_each_new_connector_in_state(state, conn, conn_state, i) {
ret = drm_atomic_set_crtc_for_connector(conn_state, NULL);
if (ret < 0)
goto free;
}
- for_each_plane_in_state(state, plane, plane_state, i) {
+ for_each_new_plane_in_state(state, plane, plane_state, i) {
ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
if (ret < 0)
goto free;
drm_atomic_set_fb_for_plane(plane_state, NULL);
+ plane_mask |= BIT(drm_plane_index(plane));
+ plane->old_fb = plane->fb;
}
ret = drm_atomic_commit(state);
free:
+ if (plane_mask)
+ drm_atomic_clean_old_fb(dev, plane_mask, ret);
drm_atomic_state_put(state);
return ret;
}
@@ -2702,11 +2897,16 @@ int drm_atomic_helper_commit_duplicated_state(struct drm_atomic_state *state,
struct drm_connector_state *new_conn_state;
struct drm_crtc *crtc;
struct drm_crtc_state *new_crtc_state;
+ unsigned plane_mask = 0;
+ struct drm_device *dev = state->dev;
+ int ret;
state->acquire_ctx = ctx;
- for_each_new_plane_in_state(state, plane, new_plane_state, i)
+ for_each_new_plane_in_state(state, plane, new_plane_state, i) {
+ plane_mask |= BIT(drm_plane_index(plane));
state->planes[i].old_state = plane->state;
+ }
for_each_new_crtc_in_state(state, crtc, new_crtc_state, i)
state->crtcs[i].old_state = crtc->state;
@@ -2714,7 +2914,11 @@ int drm_atomic_helper_commit_duplicated_state(struct drm_atomic_state *state,
for_each_new_connector_in_state(state, connector, new_conn_state, i)
state->connectors[i].old_state = connector->state;
- return drm_atomic_commit(state);
+ ret = drm_atomic_commit(state);
+ if (plane_mask)
+ drm_atomic_clean_old_fb(dev, plane_mask, ret);
+
+ return ret;
}
EXPORT_SYMBOL(drm_atomic_helper_commit_duplicated_state);
@@ -2763,177 +2967,11 @@ out:
}
EXPORT_SYMBOL(drm_atomic_helper_resume);
-/**
- * drm_atomic_helper_crtc_set_property - helper for crtc properties
- * @crtc: DRM crtc
- * @property: DRM property
- * @val: value of property
- *
- * Provides a default crtc set_property handler using the atomic driver
- * interface.
- *
- * RETURNS:
- * Zero on success, error code on failure
- */
-int
-drm_atomic_helper_crtc_set_property(struct drm_crtc *crtc,
- struct drm_property *property,
- uint64_t val)
-{
- struct drm_atomic_state *state;
- struct drm_crtc_state *crtc_state;
- int ret = 0;
-
- state = drm_atomic_state_alloc(crtc->dev);
- if (!state)
- return -ENOMEM;
-
- /* ->set_property is always called with all locks held. */
- state->acquire_ctx = crtc->dev->mode_config.acquire_ctx;
-retry:
- crtc_state = drm_atomic_get_crtc_state(state, crtc);
- if (IS_ERR(crtc_state)) {
- ret = PTR_ERR(crtc_state);
- goto fail;
- }
-
- ret = drm_atomic_crtc_set_property(crtc, crtc_state,
- property, val);
- if (ret)
- goto fail;
-
- ret = drm_atomic_commit(state);
-fail:
- if (ret == -EDEADLK)
- goto backoff;
-
- drm_atomic_state_put(state);
- return ret;
-
-backoff:
- drm_atomic_state_clear(state);
- drm_atomic_legacy_backoff(state);
-
- goto retry;
-}
-EXPORT_SYMBOL(drm_atomic_helper_crtc_set_property);
-
-/**
- * drm_atomic_helper_plane_set_property - helper for plane properties
- * @plane: DRM plane
- * @property: DRM property
- * @val: value of property
- *
- * Provides a default plane set_property handler using the atomic driver
- * interface.
- *
- * RETURNS:
- * Zero on success, error code on failure
- */
-int
-drm_atomic_helper_plane_set_property(struct drm_plane *plane,
- struct drm_property *property,
- uint64_t val)
-{
- struct drm_atomic_state *state;
- struct drm_plane_state *plane_state;
- int ret = 0;
-
- state = drm_atomic_state_alloc(plane->dev);
- if (!state)
- return -ENOMEM;
-
- /* ->set_property is always called with all locks held. */
- state->acquire_ctx = plane->dev->mode_config.acquire_ctx;
-retry:
- plane_state = drm_atomic_get_plane_state(state, plane);
- if (IS_ERR(plane_state)) {
- ret = PTR_ERR(plane_state);
- goto fail;
- }
-
- ret = drm_atomic_plane_set_property(plane, plane_state,
- property, val);
- if (ret)
- goto fail;
-
- ret = drm_atomic_commit(state);
-fail:
- if (ret == -EDEADLK)
- goto backoff;
-
- drm_atomic_state_put(state);
- return ret;
-
-backoff:
- drm_atomic_state_clear(state);
- drm_atomic_legacy_backoff(state);
-
- goto retry;
-}
-EXPORT_SYMBOL(drm_atomic_helper_plane_set_property);
-
-/**
- * drm_atomic_helper_connector_set_property - helper for connector properties
- * @connector: DRM connector
- * @property: DRM property
- * @val: value of property
- *
- * Provides a default connector set_property handler using the atomic driver
- * interface.
- *
- * RETURNS:
- * Zero on success, error code on failure
- */
-int
-drm_atomic_helper_connector_set_property(struct drm_connector *connector,
- struct drm_property *property,
- uint64_t val)
-{
- struct drm_atomic_state *state;
- struct drm_connector_state *connector_state;
- int ret = 0;
-
- state = drm_atomic_state_alloc(connector->dev);
- if (!state)
- return -ENOMEM;
-
- /* ->set_property is always called with all locks held. */
- state->acquire_ctx = connector->dev->mode_config.acquire_ctx;
-retry:
- connector_state = drm_atomic_get_connector_state(state, connector);
- if (IS_ERR(connector_state)) {
- ret = PTR_ERR(connector_state);
- goto fail;
- }
-
- ret = drm_atomic_connector_set_property(connector, connector_state,
- property, val);
- if (ret)
- goto fail;
-
- ret = drm_atomic_commit(state);
-fail:
- if (ret == -EDEADLK)
- goto backoff;
-
- drm_atomic_state_put(state);
- return ret;
-
-backoff:
- drm_atomic_state_clear(state);
- drm_atomic_legacy_backoff(state);
-
- goto retry;
-}
-EXPORT_SYMBOL(drm_atomic_helper_connector_set_property);
-
-static int page_flip_common(
- struct drm_atomic_state *state,
- struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- struct drm_pending_vblank_event *event,
- uint32_t flags)
+static int page_flip_common(struct drm_atomic_state *state,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event,
+ uint32_t flags)
{
struct drm_plane *plane = crtc->primary;
struct drm_plane_state *plane_state;
@@ -3027,13 +3065,12 @@ EXPORT_SYMBOL(drm_atomic_helper_page_flip);
* Returns:
* Returns 0 on success, negative errno numbers on failure.
*/
-int drm_atomic_helper_page_flip_target(
- struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- struct drm_pending_vblank_event *event,
- uint32_t flags,
- uint32_t target,
- struct drm_modeset_acquire_ctx *ctx)
+int drm_atomic_helper_page_flip_target(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event,
+ uint32_t flags,
+ uint32_t target,
+ struct drm_modeset_acquire_ctx *ctx)
{
struct drm_plane *plane = crtc->primary;
struct drm_atomic_state *state;
@@ -3065,85 +3102,6 @@ fail:
EXPORT_SYMBOL(drm_atomic_helper_page_flip_target);
/**
- * drm_atomic_helper_connector_dpms() - connector dpms helper implementation
- * @connector: affected connector
- * @mode: DPMS mode
- *
- * This is the main helper function provided by the atomic helper framework for
- * implementing the legacy DPMS connector interface. It computes the new desired
- * &drm_crtc_state.active state for the corresponding CRTC (if the connector is
- * enabled) and updates it.
- *
- * Returns:
- * Returns 0 on success, negative errno numbers on failure.
- */
-int drm_atomic_helper_connector_dpms(struct drm_connector *connector,
- int mode)
-{
- struct drm_mode_config *config = &connector->dev->mode_config;
- struct drm_atomic_state *state;
- struct drm_crtc_state *crtc_state;
- struct drm_crtc *crtc;
- struct drm_connector *tmp_connector;
- struct drm_connector_list_iter conn_iter;
- int ret;
- bool active = false;
- int old_mode = connector->dpms;
-
- if (mode != DRM_MODE_DPMS_ON)
- mode = DRM_MODE_DPMS_OFF;
-
- connector->dpms = mode;
- crtc = connector->state->crtc;
-
- if (!crtc)
- return 0;
-
- state = drm_atomic_state_alloc(connector->dev);
- if (!state)
- return -ENOMEM;
-
- state->acquire_ctx = crtc->dev->mode_config.acquire_ctx;
-retry:
- crtc_state = drm_atomic_get_crtc_state(state, crtc);
- if (IS_ERR(crtc_state)) {
- ret = PTR_ERR(crtc_state);
- goto fail;
- }
-
- WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
-
- drm_connector_list_iter_begin(connector->dev, &conn_iter);
- drm_for_each_connector_iter(tmp_connector, &conn_iter) {
- if (tmp_connector->state->crtc != crtc)
- continue;
-
- if (tmp_connector->dpms == DRM_MODE_DPMS_ON) {
- active = true;
- break;
- }
- }
- drm_connector_list_iter_end(&conn_iter);
- crtc_state->active = active;
-
- ret = drm_atomic_commit(state);
-fail:
- if (ret == -EDEADLK)
- goto backoff;
- if (ret != 0)
- connector->dpms = old_mode;
- drm_atomic_state_put(state);
- return ret;
-
-backoff:
- drm_atomic_state_clear(state);
- drm_atomic_legacy_backoff(state);
-
- goto retry;
-}
-EXPORT_SYMBOL(drm_atomic_helper_connector_dpms);
-
-/**
* drm_atomic_helper_best_encoder - Helper for
* &drm_connector_helper_funcs.best_encoder callback
* @connector: Connector control structure
@@ -3612,12 +3570,12 @@ int drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc,
struct drm_modeset_acquire_ctx *ctx)
{
struct drm_device *dev = crtc->dev;
- struct drm_mode_config *config = &dev->mode_config;
struct drm_atomic_state *state;
struct drm_crtc_state *crtc_state;
struct drm_property_blob *blob = NULL;
struct drm_color_lut *blob_data;
int i, ret = 0;
+ bool replaced;
state = drm_atomic_state_alloc(crtc->dev);
if (!state)
@@ -3648,20 +3606,10 @@ int drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc,
}
/* Reset DEGAMMA_LUT and CTM properties. */
- ret = drm_atomic_crtc_set_property(crtc, crtc_state,
- config->degamma_lut_property, 0);
- if (ret)
- goto fail;
-
- ret = drm_atomic_crtc_set_property(crtc, crtc_state,
- config->ctm_property, 0);
- if (ret)
- goto fail;
-
- ret = drm_atomic_crtc_set_property(crtc, crtc_state,
- config->gamma_lut_property, blob->base.id);
- if (ret)
- goto fail;
+ replaced = drm_property_replace_blob(&crtc_state->degamma_lut, NULL);
+ replaced |= drm_property_replace_blob(&crtc_state->ctm, NULL);
+ replaced |= drm_property_replace_blob(&crtc_state->gamma_lut, blob);
+ crtc_state->color_mgmt_changed |= replaced;
ret = drm_atomic_commit(state);
@@ -3671,3 +3619,18 @@ fail:
return ret;
}
EXPORT_SYMBOL(drm_atomic_helper_legacy_gamma_set);
+
+/**
+ * __drm_atomic_helper_private_duplicate_state - copy atomic private state
+ * @obj: CRTC object
+ * @state: new private object state
+ *
+ * Copies atomic state from a private objects's current state and resets inferred values.
+ * This is useful for drivers that subclass the private state.
+ */
+void __drm_atomic_helper_private_obj_duplicate_state(struct drm_private_obj *obj,
+ struct drm_private_state *state)
+{
+ memcpy(state, obj->state, sizeof(*state));
+}
+EXPORT_SYMBOL(__drm_atomic_helper_private_obj_duplicate_state);
diff --git a/drivers/gpu/drm/drm_blend.c b/drivers/gpu/drm/drm_blend.c
index db6aeec50b82..2e5e089dd912 100644
--- a/drivers/gpu/drm/drm_blend.c
+++ b/drivers/gpu/drm/drm_blend.c
@@ -319,7 +319,7 @@ static int drm_atomic_helper_crtc_normalize_zpos(struct drm_crtc *crtc,
DRM_DEBUG_ATOMIC("[CRTC:%d:%s] calculating normalized zpos values\n",
crtc->base.id, crtc->name);
- states = kmalloc_array(total_planes, sizeof(*states), GFP_TEMPORARY);
+ states = kmalloc_array(total_planes, sizeof(*states), GFP_KERNEL);
if (!states)
return -ENOMEM;
diff --git a/drivers/gpu/drm/drm_color_mgmt.c b/drivers/gpu/drm/drm_color_mgmt.c
index 3eda500fc005..fe0982708e95 100644
--- a/drivers/gpu/drm/drm_color_mgmt.c
+++ b/drivers/gpu/drm/drm_color_mgmt.c
@@ -128,6 +128,9 @@ EXPORT_SYMBOL(drm_color_lut_extract);
* optional. The gamma and degamma properties are only attached if
* their size is not 0 and ctm_property is only attached if has_ctm is
* true.
+ *
+ * Drivers should use drm_atomic_helper_legacy_gamma_set() to implement the
+ * legacy &drm_crtc_funcs.gamma_set callback.
*/
void drm_crtc_enable_color_mgmt(struct drm_crtc *crtc,
uint degamma_lut_size,
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index 8072e6e4c62c..ba9f36cef68c 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -717,9 +717,9 @@ DRM_ENUM_NAME_FN(drm_get_tv_subconnector_name,
* drivers, it remaps to controlling the "ACTIVE" property on the CRTC the
* connector is linked to. Drivers should never set this property directly,
* it is handled by the DRM core by calling the &drm_connector_funcs.dpms
- * callback. Atomic drivers should implement this hook using
- * drm_atomic_helper_connector_dpms(). This is the only property standard
- * connector property that userspace can change.
+ * callback. For atomic drivers the remapping to the "ACTIVE" property is
+ * implemented in the DRM core. This is the only standard connector
+ * property that userspace can change.
* PATH:
* Connector path property to identify how this sink is physically
* connected. Used by DP MST. This should be set by calling
@@ -1225,7 +1225,6 @@ int drm_mode_connector_set_obj_prop(struct drm_mode_object *obj,
} else if (connector->funcs->set_property)
ret = connector->funcs->set_property(connector, property, value);
- /* store the property value if successful */
if (!ret)
drm_object_property_set_value(&connector->base, property, value);
return ret;
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 4afdf7902eda..eab36a460638 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -863,8 +863,7 @@ static int drm_helper_choose_crtc_dpms(struct drm_crtc *crtc)
* provided by the driver.
*
* This function is deprecated. New drivers must implement atomic modeset
- * support, for which this function is unsuitable. Instead drivers should use
- * drm_atomic_helper_connector_dpms().
+ * support, where DPMS is handled in the DRM core.
*
* Returns:
* Always returns 0.
diff --git a/drivers/gpu/drm/drm_crtc_internal.h b/drivers/gpu/drm/drm_crtc_internal.h
index d077c5490041..a43582076b20 100644
--- a/drivers/gpu/drm/drm_crtc_internal.h
+++ b/drivers/gpu/drm/drm_crtc_internal.h
@@ -178,6 +178,13 @@ struct drm_minor;
int drm_atomic_debugfs_init(struct drm_minor *minor);
#endif
+int drm_atomic_connector_commit_dpms(struct drm_atomic_state *state,
+ struct drm_connector *connector,
+ int mode);
+int drm_atomic_set_property(struct drm_atomic_state *state,
+ struct drm_mode_object *obj,
+ struct drm_property *prop,
+ uint64_t prop_value);
int drm_atomic_get_property(struct drm_mode_object *obj,
struct drm_property *property, uint64_t *val);
int drm_mode_atomic_ioctl(struct drm_device *dev,
diff --git a/drivers/gpu/drm/drm_debugfs_crc.c b/drivers/gpu/drm/drm_debugfs_crc.c
index 1722d8f21449..f9e26dda56d6 100644
--- a/drivers/gpu/drm/drm_debugfs_crc.c
+++ b/drivers/gpu/drm/drm_debugfs_crc.c
@@ -136,21 +136,51 @@ static int crtc_crc_data_count(struct drm_crtc_crc *crc)
return CIRC_CNT(crc->head, crc->tail, DRM_CRC_ENTRIES_NR);
}
+static void crtc_crc_cleanup(struct drm_crtc_crc *crc)
+{
+ kfree(crc->entries);
+ crc->entries = NULL;
+ crc->head = 0;
+ crc->tail = 0;
+ crc->values_cnt = 0;
+ crc->opened = false;
+}
+
static int crtc_crc_open(struct inode *inode, struct file *filep)
{
struct drm_crtc *crtc = inode->i_private;
struct drm_crtc_crc *crc = &crtc->crc;
struct drm_crtc_crc_entry *entries = NULL;
size_t values_cnt;
- int ret;
+ int ret = 0;
- if (crc->opened)
- return -EBUSY;
+ if (drm_drv_uses_atomic_modeset(crtc->dev)) {
+ ret = drm_modeset_lock_interruptible(&crtc->mutex, NULL);
+ if (ret)
+ return ret;
+
+ if (!crtc->state->active)
+ ret = -EIO;
+ drm_modeset_unlock(&crtc->mutex);
+
+ if (ret)
+ return ret;
+ }
+
+ spin_lock_irq(&crc->lock);
+ if (!crc->opened)
+ crc->opened = true;
+ else
+ ret = -EBUSY;
+ spin_unlock_irq(&crc->lock);
- ret = crtc->funcs->set_crc_source(crtc, crc->source, &values_cnt);
if (ret)
return ret;
+ ret = crtc->funcs->set_crc_source(crtc, crc->source, &values_cnt);
+ if (ret)
+ goto err;
+
if (WARN_ON(values_cnt > DRM_MAX_CRC_NR)) {
ret = -EINVAL;
goto err_disable;
@@ -170,7 +200,6 @@ static int crtc_crc_open(struct inode *inode, struct file *filep)
spin_lock_irq(&crc->lock);
crc->entries = entries;
crc->values_cnt = values_cnt;
- crc->opened = true;
/*
* Only return once we got a first frame, so userspace doesn't have to
@@ -182,12 +211,17 @@ static int crtc_crc_open(struct inode *inode, struct file *filep)
crc->lock);
spin_unlock_irq(&crc->lock);
- WARN_ON(ret);
+ if (ret)
+ goto err_disable;
return 0;
err_disable:
crtc->funcs->set_crc_source(crtc, NULL, &values_cnt);
+err:
+ spin_lock_irq(&crc->lock);
+ crtc_crc_cleanup(crc);
+ spin_unlock_irq(&crc->lock);
return ret;
}
@@ -197,17 +231,12 @@ static int crtc_crc_release(struct inode *inode, struct file *filep)
struct drm_crtc_crc *crc = &crtc->crc;
size_t values_cnt;
+ crtc->funcs->set_crc_source(crtc, NULL, &values_cnt);
+
spin_lock_irq(&crc->lock);
- kfree(crc->entries);
- crc->entries = NULL;
- crc->head = 0;
- crc->tail = 0;
- crc->values_cnt = 0;
- crc->opened = false;
+ crtc_crc_cleanup(crc);
spin_unlock_irq(&crc->lock);
- crtc->funcs->set_crc_source(crtc, NULL, &values_cnt);
-
return 0;
}
@@ -334,7 +363,7 @@ int drm_crtc_add_crc_entry(struct drm_crtc *crtc, bool has_frame,
spin_lock(&crc->lock);
/* Caller may not have noticed yet that userspace has stopped reading */
- if (!crc->opened) {
+ if (!crc->entries) {
spin_unlock(&crc->lock);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/drm_dp_dual_mode_helper.c b/drivers/gpu/drm/drm_dp_dual_mode_helper.c
index 80e62f669321..0ef9011a1856 100644
--- a/drivers/gpu/drm/drm_dp_dual_mode_helper.c
+++ b/drivers/gpu/drm/drm_dp_dual_mode_helper.c
@@ -111,7 +111,7 @@ ssize_t drm_dp_dual_mode_write(struct i2c_adapter *adapter,
void *data;
int ret;
- data = kmalloc(msg.len, GFP_TEMPORARY);
+ data = kmalloc(msg.len, GFP_KERNEL);
if (!data)
return -ENOMEM;
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index ae5f06895562..41b492f99955 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -31,6 +31,8 @@
#include <drm/drmP.h>
#include <drm/drm_fixed.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
/**
* DOC: dp mst helper
@@ -1342,15 +1344,17 @@ static void drm_dp_mst_link_probe_work(struct work_struct *work)
static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
u8 *guid)
{
- static u8 zero_guid[16];
+ u64 salt;
- if (!memcmp(guid, zero_guid, 16)) {
- u64 salt = get_jiffies_64();
- memcpy(&guid[0], &salt, sizeof(u64));
- memcpy(&guid[8], &salt, sizeof(u64));
- return false;
- }
- return true;
+ if (memchr_inv(guid, 0, 16))
+ return true;
+
+ salt = get_jiffies_64();
+
+ memcpy(&guid[0], &salt, sizeof(u64));
+ memcpy(&guid[8], &salt, sizeof(u64));
+
+ return false;
}
#if 0
@@ -2540,8 +2544,8 @@ int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state,
int req_slots;
topology_state = drm_atomic_get_mst_topology_state(state, mgr);
- if (topology_state == NULL)
- return -ENOMEM;
+ if (IS_ERR(topology_state))
+ return PTR_ERR(topology_state);
port = drm_dp_get_validated_port_ref(mgr, port);
if (port == NULL)
@@ -2580,8 +2584,8 @@ int drm_dp_atomic_release_vcpi_slots(struct drm_atomic_state *state,
struct drm_dp_mst_topology_state *topology_state;
topology_state = drm_atomic_get_mst_topology_state(state, mgr);
- if (topology_state == NULL)
- return -ENOMEM;
+ if (IS_ERR(topology_state))
+ return PTR_ERR(topology_state);
/* We cannot rely on port->vcpi.num_slots to update
* topology_state->avail_slots as the port may not exist if the parent
@@ -3017,41 +3021,32 @@ static void drm_dp_destroy_connector_work(struct work_struct *work)
(*mgr->cbs->hotplug)(mgr);
}
-void *drm_dp_mst_duplicate_state(struct drm_atomic_state *state, void *obj)
+static struct drm_private_state *
+drm_dp_mst_duplicate_state(struct drm_private_obj *obj)
{
- struct drm_dp_mst_topology_mgr *mgr = obj;
- struct drm_dp_mst_topology_state *new_mst_state;
+ struct drm_dp_mst_topology_state *state;
- if (WARN_ON(!mgr->state))
+ state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
+ if (!state)
return NULL;
- new_mst_state = kmemdup(mgr->state, sizeof(*new_mst_state), GFP_KERNEL);
- if (new_mst_state)
- new_mst_state->state = state;
- return new_mst_state;
-}
-
-void drm_dp_mst_swap_state(void *obj, void **obj_state_ptr)
-{
- struct drm_dp_mst_topology_mgr *mgr = obj;
- struct drm_dp_mst_topology_state **topology_state_ptr;
-
- topology_state_ptr = (struct drm_dp_mst_topology_state **)obj_state_ptr;
+ __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
- mgr->state->state = (*topology_state_ptr)->state;
- swap(*topology_state_ptr, mgr->state);
- mgr->state->state = NULL;
+ return &state->base;
}
-void drm_dp_mst_destroy_state(void *obj_state)
+static void drm_dp_mst_destroy_state(struct drm_private_obj *obj,
+ struct drm_private_state *state)
{
- kfree(obj_state);
+ struct drm_dp_mst_topology_state *mst_state =
+ to_dp_mst_topology_state(state);
+
+ kfree(mst_state);
}
static const struct drm_private_state_funcs mst_state_funcs = {
- .duplicate_state = drm_dp_mst_duplicate_state,
- .swap_state = drm_dp_mst_swap_state,
- .destroy_state = drm_dp_mst_destroy_state,
+ .atomic_duplicate_state = drm_dp_mst_duplicate_state,
+ .atomic_destroy_state = drm_dp_mst_destroy_state,
};
/**
@@ -3075,8 +3070,7 @@ struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_a
struct drm_device *dev = mgr->dev;
WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
- return drm_atomic_get_private_obj_state(state, mgr,
- &mst_state_funcs);
+ return to_dp_mst_topology_state(drm_atomic_get_private_obj_state(state, &mgr->base));
}
EXPORT_SYMBOL(drm_atomic_get_mst_topology_state);
@@ -3096,6 +3090,8 @@ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
int max_dpcd_transaction_bytes,
int max_payloads, int conn_base_id)
{
+ struct drm_dp_mst_topology_state *mst_state;
+
mutex_init(&mgr->lock);
mutex_init(&mgr->qlock);
mutex_init(&mgr->payload_lock);
@@ -3124,14 +3120,18 @@ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
if (test_calc_pbn_mode() < 0)
DRM_ERROR("MST PBN self-test failed\n");
- mgr->state = kzalloc(sizeof(*mgr->state), GFP_KERNEL);
- if (mgr->state == NULL)
+ mst_state = kzalloc(sizeof(*mst_state), GFP_KERNEL);
+ if (mst_state == NULL)
return -ENOMEM;
- mgr->state->mgr = mgr;
+
+ mst_state->mgr = mgr;
/* max. time slots - one slot for MTP header */
- mgr->state->avail_slots = 63;
- mgr->funcs = &mst_state_funcs;
+ mst_state->avail_slots = 63;
+
+ drm_atomic_private_obj_init(&mgr->base,
+ &mst_state->base,
+ &mst_state_funcs);
return 0;
}
@@ -3153,8 +3153,7 @@ void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr)
mutex_unlock(&mgr->payload_lock);
mgr->dev = NULL;
mgr->aux = NULL;
- kfree(mgr->state);
- mgr->state = NULL;
+ drm_atomic_private_obj_fini(&mgr->base);
mgr->funcs = NULL;
}
EXPORT_SYMBOL(drm_dp_mst_topology_mgr_destroy);
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 37b8ad3e30d8..be38ac7050d4 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -63,6 +63,15 @@ module_param_named(debug, drm_debug, int, 0600);
static DEFINE_SPINLOCK(drm_minor_lock);
static struct idr drm_minors_idr;
+/*
+ * If the drm core fails to init for whatever reason,
+ * we should prevent any drivers from registering with it.
+ * It's best to check this at drm_dev_init(), as some drivers
+ * prefer to embed struct drm_device into their own device
+ * structure and call drm_dev_init() themselves.
+ */
+static bool drm_core_init_complete = false;
+
static struct dentry *drm_debugfs_root;
#define DRM_PRINTK_FMT "[" DRM_NAME ":%s]%s %pV"
@@ -282,7 +291,7 @@ struct drm_minor *drm_minor_acquire(unsigned int minor_id)
if (!minor) {
return ERR_PTR(-ENODEV);
- } else if (drm_device_is_unplugged(minor->dev)) {
+ } else if (drm_dev_is_unplugged(minor->dev)) {
drm_dev_unref(minor->dev);
return ERR_PTR(-ENODEV);
}
@@ -355,26 +364,32 @@ void drm_put_dev(struct drm_device *dev)
}
EXPORT_SYMBOL(drm_put_dev);
-void drm_unplug_dev(struct drm_device *dev)
+static void drm_device_set_unplugged(struct drm_device *dev)
{
- /* for a USB device */
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- drm_modeset_unregister_all(dev);
+ smp_wmb();
+ atomic_set(&dev->unplugged, 1);
+}
- drm_minor_unregister(dev, DRM_MINOR_PRIMARY);
- drm_minor_unregister(dev, DRM_MINOR_RENDER);
- drm_minor_unregister(dev, DRM_MINOR_CONTROL);
+/**
+ * drm_dev_unplug - unplug a DRM device
+ * @dev: DRM device
+ *
+ * This unplugs a hotpluggable DRM device, which makes it inaccessible to
+ * userspace operations. Entry-points can use drm_dev_is_unplugged(). This
+ * essentially unregisters the device like drm_dev_unregister(), but can be
+ * called while there are still open users of @dev.
+ */
+void drm_dev_unplug(struct drm_device *dev)
+{
+ drm_dev_unregister(dev);
mutex_lock(&drm_global_mutex);
-
drm_device_set_unplugged(dev);
-
- if (dev->open_count == 0) {
- drm_put_dev(dev);
- }
+ if (dev->open_count == 0)
+ drm_dev_unref(dev);
mutex_unlock(&drm_global_mutex);
}
-EXPORT_SYMBOL(drm_unplug_dev);
+EXPORT_SYMBOL(drm_dev_unplug);
/*
* DRM internal mount
@@ -484,6 +499,11 @@ int drm_dev_init(struct drm_device *dev,
{
int ret;
+ if (!drm_core_init_complete) {
+ DRM_ERROR("DRM core is not initialized\n");
+ return -ENODEV;
+ }
+
kref_init(&dev->ref);
dev->dev = parent;
dev->driver = driver;
@@ -821,6 +841,9 @@ EXPORT_SYMBOL(drm_dev_register);
* drm_dev_register() but does not deallocate the device. The caller must call
* drm_dev_unref() to drop their final reference.
*
+ * A special form of unregistering for hotpluggable devices is drm_dev_unplug(),
+ * which can be called while there are still open users of @dev.
+ *
* This should be called first in the device teardown code to make sure
* userspace can't access the device instance any more.
*/
@@ -828,7 +851,8 @@ void drm_dev_unregister(struct drm_device *dev)
{
struct drm_map_list *r_list, *list_temp;
- drm_lastclose(dev);
+ if (drm_core_check_feature(dev, DRIVER_LEGACY))
+ drm_lastclose(dev);
dev->registered = false;
@@ -966,6 +990,8 @@ static int __init drm_core_init(void)
if (ret < 0)
goto error;
+ drm_core_init_complete = true;
+
DRM_DEBUG("Initialized\n");
return 0;
diff --git a/drivers/gpu/drm/drm_dumb_buffers.c b/drivers/gpu/drm/drm_dumb_buffers.c
index 10307cc16d75..39ac15ce4702 100644
--- a/drivers/gpu/drm/drm_dumb_buffers.c
+++ b/drivers/gpu/drm/drm_dumb_buffers.c
@@ -24,6 +24,7 @@
*/
#include <drm/drmP.h>
+#include <drm/drm_gem.h>
#include "drm_crtc_internal.h"
@@ -42,9 +43,10 @@
* create dumb buffers suitable for scanout, which can then be used to create
* KMS frame buffers.
*
- * To support dumb objects drivers must implement the &drm_driver.dumb_create,
- * &drm_driver.dumb_destroy and &drm_driver.dumb_map_offset operations. See
- * there for further details.
+ * To support dumb objects drivers must implement the &drm_driver.dumb_create
+ * operation. &drm_driver.dumb_destroy defaults to drm_gem_dumb_destroy() if
+ * not set and &drm_driver.dumb_map_offset defaults to
+ * drm_gem_dumb_map_offset(). See the callbacks for further details.
*
* Note that dumb objects may not be used for gpu acceleration, as has been
* attempted on some ARM embedded platforms. Such drivers really must have
@@ -108,11 +110,16 @@ int drm_mode_mmap_dumb_ioctl(struct drm_device *dev,
{
struct drm_mode_map_dumb *args = data;
- /* call driver ioctl to get mmap offset */
- if (!dev->driver->dumb_map_offset)
+ if (!dev->driver->dumb_create)
return -ENOSYS;
- return dev->driver->dumb_map_offset(file_priv, dev, args->handle, &args->offset);
+ if (dev->driver->dumb_map_offset)
+ return dev->driver->dumb_map_offset(file_priv, dev,
+ args->handle,
+ &args->offset);
+ else
+ return drm_gem_dumb_map_offset(file_priv, dev, args->handle,
+ &args->offset);
}
int drm_mode_destroy_dumb_ioctl(struct drm_device *dev,
@@ -120,9 +127,12 @@ int drm_mode_destroy_dumb_ioctl(struct drm_device *dev,
{
struct drm_mode_destroy_dumb *args = data;
- if (!dev->driver->dumb_destroy)
+ if (!dev->driver->dumb_create)
return -ENOSYS;
- return dev->driver->dumb_destroy(file_priv, dev, args->handle);
+ if (dev->driver->dumb_destroy)
+ return dev->driver->dumb_destroy(file_priv, dev, args->handle);
+ else
+ return drm_gem_dumb_destroy(file_priv, dev, args->handle);
}
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 2e55599816aa..6bb6337be920 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -1006,6 +1006,221 @@ static const struct drm_display_mode edid_cea_modes[] = {
2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ /* 65 - 1280x720@24Hz */
+ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 59400, 1280, 3040,
+ 3080, 3300, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 66 - 1280x720@25Hz */
+ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3700,
+ 3740, 3960, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 67 - 1280x720@30Hz */
+ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3040,
+ 3080, 3300, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 68 - 1280x720@50Hz */
+ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1720,
+ 1760, 1980, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 69 - 1280x720@60Hz */
+ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1390,
+ 1430, 1650, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 70 - 1280x720@100Hz */
+ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1720,
+ 1760, 1980, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 71 - 1280x720@120Hz */
+ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1390,
+ 1430, 1650, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 72 - 1920x1080@24Hz */
+ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2558,
+ 2602, 2750, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 73 - 1920x1080@25Hz */
+ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
+ 2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 74 - 1920x1080@30Hz */
+ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
+ 2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 75 - 1920x1080@50Hz */
+ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
+ 2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 76 - 1920x1080@60Hz */
+ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
+ 2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 77 - 1920x1080@100Hz */
+ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2448,
+ 2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 78 - 1920x1080@120Hz */
+ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2008,
+ 2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 79 - 1680x720@24Hz */
+ { DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 59400, 1680, 3040,
+ 3080, 3300, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 80 - 1680x720@25Hz */
+ { DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 59400, 1680, 2908,
+ 2948, 3168, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 81 - 1680x720@30Hz */
+ { DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 59400, 1680, 2380,
+ 2420, 2640, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 82 - 1680x720@50Hz */
+ { DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 82500, 1680, 1940,
+ 1980, 2200, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 83 - 1680x720@60Hz */
+ { DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 99000, 1680, 1940,
+ 1980, 2200, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 84 - 1680x720@100Hz */
+ { DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 165000, 1680, 1740,
+ 1780, 2000, 0, 720, 725, 730, 825, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 85 - 1680x720@120Hz */
+ { DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 198000, 1680, 1740,
+ 1780, 2000, 0, 720, 725, 730, 825, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 86 - 2560x1080@24Hz */
+ { DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 99000, 2560, 3558,
+ 3602, 3750, 0, 1080, 1084, 1089, 1100, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 87 - 2560x1080@25Hz */
+ { DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 90000, 2560, 3008,
+ 3052, 3200, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 88 - 2560x1080@30Hz */
+ { DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 118800, 2560, 3328,
+ 3372, 3520, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 89 - 2560x1080@50Hz */
+ { DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 185625, 2560, 3108,
+ 3152, 3300, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 90 - 2560x1080@60Hz */
+ { DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 198000, 2560, 2808,
+ 2852, 3000, 0, 1080, 1084, 1089, 1100, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 91 - 2560x1080@100Hz */
+ { DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 371250, 2560, 2778,
+ 2822, 2970, 0, 1080, 1084, 1089, 1250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 92 - 2560x1080@120Hz */
+ { DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 495000, 2560, 3108,
+ 3152, 3300, 0, 1080, 1084, 1089, 1250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 93 - 3840x2160p@24Hz 16:9 */
+ { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 5116,
+ 5204, 5500, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ /* 94 - 3840x2160p@25Hz 16:9 */
+ { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 4896,
+ 4984, 5280, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ /* 95 - 3840x2160p@30Hz 16:9 */
+ { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 4016,
+ 4104, 4400, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ /* 96 - 3840x2160p@50Hz 16:9 */
+ { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 594000, 3840, 4896,
+ 4984, 5280, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ /* 97 - 3840x2160p@60Hz 16:9 */
+ { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 594000, 3840, 4016,
+ 4104, 4400, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ /* 98 - 4096x2160p@24Hz 256:135 */
+ { DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 297000, 4096, 5116,
+ 5204, 5500, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, },
+ /* 99 - 4096x2160p@25Hz 256:135 */
+ { DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 297000, 4096, 5064,
+ 5152, 5280, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, },
+ /* 100 - 4096x2160p@30Hz 256:135 */
+ { DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 297000, 4096, 4184,
+ 4272, 4400, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, },
+ /* 101 - 4096x2160p@50Hz 256:135 */
+ { DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 594000, 4096, 5064,
+ 5152, 5280, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, },
+ /* 102 - 4096x2160p@60Hz 256:135 */
+ { DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 594000, 4096, 4184,
+ 4272, 4400, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, },
+ /* 103 - 3840x2160p@24Hz 64:27 */
+ { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 5116,
+ 5204, 5500, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 104 - 3840x2160p@25Hz 64:27 */
+ { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 4896,
+ 4984, 5280, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 105 - 3840x2160p@30Hz 64:27 */
+ { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 4016,
+ 4104, 4400, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 106 - 3840x2160p@50Hz 64:27 */
+ { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 594000, 3840, 4896,
+ 4984, 5280, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 107 - 3840x2160p@60Hz 64:27 */
+ { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 594000, 3840, 4016,
+ 4104, 4400, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
};
/*
@@ -2566,7 +2781,10 @@ add_detailed_modes(struct drm_connector *connector, struct edid *edid,
#define VIDEO_BLOCK 0x02
#define VENDOR_BLOCK 0x03
#define SPEAKER_BLOCK 0x04
-#define VIDEO_CAPABILITY_BLOCK 0x07
+#define USE_EXTENDED_TAG 0x07
+#define EXT_VIDEO_CAPABILITY_BLOCK 0x00
+#define EXT_VIDEO_DATA_BLOCK_420 0x0E
+#define EXT_VIDEO_CAP_BLOCK_Y420CMDB 0x0F
#define EDID_BASIC_AUDIO (1 << 6)
#define EDID_CEA_YCRCB444 (1 << 5)
#define EDID_CEA_YCRCB422 (1 << 4)
@@ -2902,6 +3120,15 @@ add_alternate_cea_modes(struct drm_connector *connector, struct edid *edid)
return modes;
}
+static u8 svd_to_vic(u8 svd)
+{
+ /* 0-6 bit vic, 7th bit native mode indicator */
+ if ((svd >= 1 && svd <= 64) || (svd >= 129 && svd <= 192))
+ return svd & 127;
+
+ return svd;
+}
+
static struct drm_display_mode *
drm_display_mode_from_vic_index(struct drm_connector *connector,
const u8 *video_db, u8 video_len,
@@ -2915,7 +3142,7 @@ drm_display_mode_from_vic_index(struct drm_connector *connector,
return NULL;
/* CEA modes are numbered 1..127 */
- vic = (video_db[video_index] & 127);
+ vic = svd_to_vic(video_db[video_index]);
if (!drm_valid_cea_vic(vic))
return NULL;
@@ -2928,15 +3155,85 @@ drm_display_mode_from_vic_index(struct drm_connector *connector,
return newmode;
}
+/*
+ * do_y420vdb_modes - Parse YCBCR 420 only modes
+ * @connector: connector corresponding to the HDMI sink
+ * @svds: start of the data block of CEA YCBCR 420 VDB
+ * @len: length of the CEA YCBCR 420 VDB
+ *
+ * Parse the CEA-861-F YCBCR 420 Video Data Block (Y420VDB)
+ * which contains modes which can be supported in YCBCR 420
+ * output format only.
+ */
+static int do_y420vdb_modes(struct drm_connector *connector,
+ const u8 *svds, u8 svds_len)
+{
+ int modes = 0, i;
+ struct drm_device *dev = connector->dev;
+ struct drm_display_info *info = &connector->display_info;
+ struct drm_hdmi_info *hdmi = &info->hdmi;
+
+ for (i = 0; i < svds_len; i++) {
+ u8 vic = svd_to_vic(svds[i]);
+ struct drm_display_mode *newmode;
+
+ if (!drm_valid_cea_vic(vic))
+ continue;
+
+ newmode = drm_mode_duplicate(dev, &edid_cea_modes[vic]);
+ if (!newmode)
+ break;
+ bitmap_set(hdmi->y420_vdb_modes, vic, 1);
+ drm_mode_probed_add(connector, newmode);
+ modes++;
+ }
+
+ if (modes > 0)
+ info->color_formats |= DRM_COLOR_FORMAT_YCRCB420;
+ return modes;
+}
+
+/*
+ * drm_add_cmdb_modes - Add a YCBCR 420 mode into bitmap
+ * @connector: connector corresponding to the HDMI sink
+ * @vic: CEA vic for the video mode to be added in the map
+ *
+ * Makes an entry for a videomode in the YCBCR 420 bitmap
+ */
+static void
+drm_add_cmdb_modes(struct drm_connector *connector, u8 svd)
+{
+ u8 vic = svd_to_vic(svd);
+ struct drm_hdmi_info *hdmi = &connector->display_info.hdmi;
+
+ if (!drm_valid_cea_vic(vic))
+ return;
+
+ bitmap_set(hdmi->y420_cmdb_modes, vic, 1);
+}
+
static int
do_cea_modes(struct drm_connector *connector, const u8 *db, u8 len)
{
int i, modes = 0;
+ struct drm_hdmi_info *hdmi = &connector->display_info.hdmi;
for (i = 0; i < len; i++) {
struct drm_display_mode *mode;
mode = drm_display_mode_from_vic_index(connector, db, len, i);
if (mode) {
+ /*
+ * YCBCR420 capability block contains a bitmap which
+ * gives the index of CEA modes from CEA VDB, which
+ * can support YCBCR 420 sampling output also (apart
+ * from RGB/YCBCR444 etc).
+ * For example, if the bit 0 in bitmap is set,
+ * first mode in VDB can support YCBCR420 output too.
+ * Add YCBCR420 modes only if sink is HDMI 2.0 capable.
+ */
+ if (i < 64 && hdmi->y420_cmdb_map & (1ULL << i))
+ drm_add_cmdb_modes(connector, db[i]);
+
drm_mode_probed_add(connector, mode);
modes++;
}
@@ -3218,6 +3515,12 @@ cea_db_payload_len(const u8 *db)
}
static int
+cea_db_extended_tag(const u8 *db)
+{
+ return db[1];
+}
+
+static int
cea_db_tag(const u8 *db)
{
return db[0] >> 5;
@@ -3272,9 +3575,77 @@ static bool cea_db_is_hdmi_forum_vsdb(const u8 *db)
return oui == HDMI_FORUM_IEEE_OUI;
}
+static bool cea_db_is_y420cmdb(const u8 *db)
+{
+ if (cea_db_tag(db) != USE_EXTENDED_TAG)
+ return false;
+
+ if (!cea_db_payload_len(db))
+ return false;
+
+ if (cea_db_extended_tag(db) != EXT_VIDEO_CAP_BLOCK_Y420CMDB)
+ return false;
+
+ return true;
+}
+
+static bool cea_db_is_y420vdb(const u8 *db)
+{
+ if (cea_db_tag(db) != USE_EXTENDED_TAG)
+ return false;
+
+ if (!cea_db_payload_len(db))
+ return false;
+
+ if (cea_db_extended_tag(db) != EXT_VIDEO_DATA_BLOCK_420)
+ return false;
+
+ return true;
+}
+
#define for_each_cea_db(cea, i, start, end) \
for ((i) = (start); (i) < (end) && (i) + cea_db_payload_len(&(cea)[(i)]) < (end); (i) += cea_db_payload_len(&(cea)[(i)]) + 1)
+static void drm_parse_y420cmdb_bitmap(struct drm_connector *connector,
+ const u8 *db)
+{
+ struct drm_display_info *info = &connector->display_info;
+ struct drm_hdmi_info *hdmi = &info->hdmi;
+ u8 map_len = cea_db_payload_len(db) - 1;
+ u8 count;
+ u64 map = 0;
+
+ if (map_len == 0) {
+ /* All CEA modes support ycbcr420 sampling also.*/
+ hdmi->y420_cmdb_map = U64_MAX;
+ info->color_formats |= DRM_COLOR_FORMAT_YCRCB420;
+ return;
+ }
+
+ /*
+ * This map indicates which of the existing CEA block modes
+ * from VDB can support YCBCR420 output too. So if bit=0 is
+ * set, first mode from VDB can support YCBCR420 output too.
+ * We will parse and keep this map, before parsing VDB itself
+ * to avoid going through the same block again and again.
+ *
+ * Spec is not clear about max possible size of this block.
+ * Clamping max bitmap block size at 8 bytes. Every byte can
+ * address 8 CEA modes, in this way this map can address
+ * 8*8 = first 64 SVDs.
+ */
+ if (WARN_ON_ONCE(map_len > 8))
+ map_len = 8;
+
+ for (count = 0; count < map_len; count++)
+ map |= (u64)db[2 + count] << (8 * count);
+
+ if (map)
+ info->color_formats |= DRM_COLOR_FORMAT_YCRCB420;
+
+ hdmi->y420_cmdb_map = map;
+}
+
static int
add_cea_modes(struct drm_connector *connector, struct edid *edid)
{
@@ -3297,10 +3668,16 @@ add_cea_modes(struct drm_connector *connector, struct edid *edid)
video = db + 1;
video_len = dbl;
modes += do_cea_modes(connector, video, dbl);
- }
- else if (cea_db_is_hdmi_vsdb(db)) {
+ } else if (cea_db_is_hdmi_vsdb(db)) {
hdmi = db;
hdmi_len = dbl;
+ } else if (cea_db_is_y420vdb(db)) {
+ const u8 *vdb420 = &db[2];
+
+ /* Add 4:2:0(only) modes present in EDID */
+ modes += do_y420vdb_modes(connector,
+ vdb420,
+ dbl - 1);
}
}
}
@@ -3793,8 +4170,10 @@ bool drm_rgb_quant_range_selectable(struct edid *edid)
return false;
for_each_cea_db(edid_ext, i, start, end) {
- if (cea_db_tag(&edid_ext[i]) == VIDEO_CAPABILITY_BLOCK &&
- cea_db_payload_len(&edid_ext[i]) == 2) {
+ if (cea_db_tag(&edid_ext[i]) == USE_EXTENDED_TAG &&
+ cea_db_payload_len(&edid_ext[i]) == 2 &&
+ cea_db_extended_tag(&edid_ext[i]) ==
+ EXT_VIDEO_CAPABILITY_BLOCK) {
DRM_DEBUG_KMS("CEA VCDB 0x%02x\n", edid_ext[i + 2]);
return edid_ext[i + 2] & EDID_CEA_VCDB_QS;
}
@@ -3823,6 +4202,16 @@ drm_default_rgb_quant_range(const struct drm_display_mode *mode)
}
EXPORT_SYMBOL(drm_default_rgb_quant_range);
+static void drm_parse_ycbcr420_deep_color_info(struct drm_connector *connector,
+ const u8 *db)
+{
+ u8 dc_mask;
+ struct drm_hdmi_info *hdmi = &connector->display_info.hdmi;
+
+ dc_mask = db[7] & DRM_EDID_YCBCR420_DC_MASK;
+ hdmi->y420_dc_modes |= dc_mask;
+}
+
static void drm_parse_hdmi_forum_vsdb(struct drm_connector *connector,
const u8 *hf_vsdb)
{
@@ -3863,6 +4252,8 @@ static void drm_parse_hdmi_forum_vsdb(struct drm_connector *connector,
scdc->scrambling.low_rates = true;
}
}
+
+ drm_parse_ycbcr420_deep_color_info(connector, hf_vsdb);
}
static void drm_parse_hdmi_deep_color_info(struct drm_connector *connector,
@@ -3981,6 +4372,8 @@ static void drm_parse_cea_ext(struct drm_connector *connector,
drm_parse_hdmi_vsdb_video(connector, db);
if (cea_db_is_hdmi_forum_vsdb(db))
drm_parse_hdmi_forum_vsdb(connector, db);
+ if (cea_db_is_y420cmdb(db))
+ drm_parse_y420cmdb_bitmap(connector, db);
}
}
@@ -4215,6 +4608,13 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
quirks = edid_get_quirks(edid);
/*
+ * CEA-861-F adds ycbcr capability map block, for HDMI 2.0 sinks.
+ * To avoid multiple parsing of same block, lets parse that map
+ * from sink info, before parsing CEA modes.
+ */
+ drm_add_display_info(connector, edid);
+
+ /*
* EDID spec says modes should be preferred in this order:
* - preferred detailed mode
* - other detailed modes from base block
@@ -4241,8 +4641,6 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
if (quirks & (EDID_QUIRK_PREFER_LARGE_60 | EDID_QUIRK_PREFER_LARGE_75))
edid_fixup_preferred(connector, quirks);
- drm_add_display_info(connector, edid);
-
if (quirks & EDID_QUIRK_FORCE_6BPC)
connector->display_info.bpc = 6;
@@ -4334,12 +4732,14 @@ EXPORT_SYMBOL(drm_set_preferred_mode);
* data from a DRM display mode
* @frame: HDMI AVI infoframe
* @mode: DRM display mode
+ * @is_hdmi2_sink: Sink is HDMI 2.0 compliant
*
* Return: 0 on success or a negative error code on failure.
*/
int
drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
- const struct drm_display_mode *mode)
+ const struct drm_display_mode *mode,
+ bool is_hdmi2_sink)
{
int err;
@@ -4355,6 +4755,28 @@ drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
frame->video_code = drm_match_cea_mode(mode);
+ /*
+ * HDMI 1.4 VIC range: 1 <= VIC <= 64 (CEA-861-D) but
+ * HDMI 2.0 VIC range: 1 <= VIC <= 107 (CEA-861-F). So we
+ * have to make sure we dont break HDMI 1.4 sinks.
+ */
+ if (!is_hdmi2_sink && frame->video_code > 64)
+ frame->video_code = 0;
+
+ /*
+ * HDMI spec says if a mode is found in HDMI 1.4b 4K modes
+ * we should send its VIC in vendor infoframes, else send the
+ * VIC in AVI infoframes. Lets check if this mode is present in
+ * HDMI 1.4b 4K modes
+ */
+ if (frame->video_code) {
+ u8 vendor_if_vic = drm_match_hdmi_mode(mode);
+ bool is_s3d = mode->flags & DRM_MODE_FLAG_3D_MASK;
+
+ if (drm_valid_hdmi_vic(vendor_if_vic) && !is_s3d)
+ frame->video_code = 0;
+ }
+
frame->picture_aspect = HDMI_PICTURE_ASPECT_NONE;
/*
diff --git a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_cma_helper.c
index 53f9bdf470d7..f2ee88363015 100644
--- a/drivers/gpu/drm/drm_fb_cma_helper.c
+++ b/drivers/gpu/drm/drm_fb_cma_helper.c
@@ -18,27 +18,17 @@
*/
#include <drm/drmP.h>
-#include <drm/drm_atomic.h>
-#include <drm/drm_crtc.h>
#include <drm/drm_fb_helper.h>
-#include <drm/drm_crtc_helper.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_fb_cma_helper.h>
-#include <linux/dma-buf.h>
-#include <linux/dma-mapping.h>
#include <linux/module.h>
-#include <linux/reservation.h>
#define DEFAULT_FBDEFIO_DELAY_MS 50
-struct drm_fb_cma {
- struct drm_framebuffer fb;
- struct drm_gem_cma_object *obj[4];
-};
-
struct drm_fbdev_cma {
struct drm_fb_helper fb_helper;
- struct drm_fb_cma *fb;
const struct drm_framebuffer_funcs *fb_funcs;
};
@@ -90,69 +80,19 @@ static inline struct drm_fbdev_cma *to_fbdev_cma(struct drm_fb_helper *helper)
return container_of(helper, struct drm_fbdev_cma, fb_helper);
}
-static inline struct drm_fb_cma *to_fb_cma(struct drm_framebuffer *fb)
-{
- return container_of(fb, struct drm_fb_cma, fb);
-}
-
void drm_fb_cma_destroy(struct drm_framebuffer *fb)
{
- struct drm_fb_cma *fb_cma = to_fb_cma(fb);
- int i;
-
- for (i = 0; i < 4; i++) {
- if (fb_cma->obj[i])
- drm_gem_object_put_unlocked(&fb_cma->obj[i]->base);
- }
-
- drm_framebuffer_cleanup(fb);
- kfree(fb_cma);
+ drm_gem_fb_destroy(fb);
}
EXPORT_SYMBOL(drm_fb_cma_destroy);
int drm_fb_cma_create_handle(struct drm_framebuffer *fb,
struct drm_file *file_priv, unsigned int *handle)
{
- struct drm_fb_cma *fb_cma = to_fb_cma(fb);
-
- return drm_gem_handle_create(file_priv,
- &fb_cma->obj[0]->base, handle);
+ return drm_gem_fb_create_handle(fb, file_priv, handle);
}
EXPORT_SYMBOL(drm_fb_cma_create_handle);
-static struct drm_framebuffer_funcs drm_fb_cma_funcs = {
- .destroy = drm_fb_cma_destroy,
- .create_handle = drm_fb_cma_create_handle,
-};
-
-static struct drm_fb_cma *drm_fb_cma_alloc(struct drm_device *dev,
- const struct drm_mode_fb_cmd2 *mode_cmd,
- struct drm_gem_cma_object **obj,
- unsigned int num_planes, const struct drm_framebuffer_funcs *funcs)
-{
- struct drm_fb_cma *fb_cma;
- int ret;
- int i;
-
- fb_cma = kzalloc(sizeof(*fb_cma), GFP_KERNEL);
- if (!fb_cma)
- return ERR_PTR(-ENOMEM);
-
- drm_helper_mode_fill_fb_struct(dev, &fb_cma->fb, mode_cmd);
-
- for (i = 0; i < num_planes; i++)
- fb_cma->obj[i] = obj[i];
-
- ret = drm_framebuffer_init(dev, &fb_cma->fb, funcs);
- if (ret) {
- dev_err(dev->dev, "Failed to initialize framebuffer: %d\n", ret);
- kfree(fb_cma);
- return ERR_PTR(ret);
- }
-
- return fb_cma;
-}
-
/**
* drm_fb_cma_create_with_funcs() - helper function for the
* &drm_mode_config_funcs.fb_create
@@ -170,53 +110,7 @@ struct drm_framebuffer *drm_fb_cma_create_with_funcs(struct drm_device *dev,
struct drm_file *file_priv, const struct drm_mode_fb_cmd2 *mode_cmd,
const struct drm_framebuffer_funcs *funcs)
{
- const struct drm_format_info *info;
- struct drm_fb_cma *fb_cma;
- struct drm_gem_cma_object *objs[4];
- struct drm_gem_object *obj;
- int ret;
- int i;
-
- info = drm_get_format_info(dev, mode_cmd);
- if (!info)
- return ERR_PTR(-EINVAL);
-
- for (i = 0; i < info->num_planes; i++) {
- unsigned int width = mode_cmd->width / (i ? info->hsub : 1);
- unsigned int height = mode_cmd->height / (i ? info->vsub : 1);
- unsigned int min_size;
-
- obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[i]);
- if (!obj) {
- dev_err(dev->dev, "Failed to lookup GEM object\n");
- ret = -ENOENT;
- goto err_gem_object_put;
- }
-
- min_size = (height - 1) * mode_cmd->pitches[i]
- + width * info->cpp[i]
- + mode_cmd->offsets[i];
-
- if (obj->size < min_size) {
- drm_gem_object_put_unlocked(obj);
- ret = -EINVAL;
- goto err_gem_object_put;
- }
- objs[i] = to_drm_gem_cma_obj(obj);
- }
-
- fb_cma = drm_fb_cma_alloc(dev, mode_cmd, objs, i, funcs);
- if (IS_ERR(fb_cma)) {
- ret = PTR_ERR(fb_cma);
- goto err_gem_object_put;
- }
-
- return &fb_cma->fb;
-
-err_gem_object_put:
- for (i--; i >= 0; i--)
- drm_gem_object_put_unlocked(&objs[i]->base);
- return ERR_PTR(ret);
+ return drm_gem_fb_create_with_funcs(dev, file_priv, mode_cmd, funcs);
}
EXPORT_SYMBOL_GPL(drm_fb_cma_create_with_funcs);
@@ -233,8 +127,7 @@ EXPORT_SYMBOL_GPL(drm_fb_cma_create_with_funcs);
struct drm_framebuffer *drm_fb_cma_create(struct drm_device *dev,
struct drm_file *file_priv, const struct drm_mode_fb_cmd2 *mode_cmd)
{
- return drm_fb_cma_create_with_funcs(dev, file_priv, mode_cmd,
- &drm_fb_cma_funcs);
+ return drm_gem_fb_create(dev, file_priv, mode_cmd);
}
EXPORT_SYMBOL_GPL(drm_fb_cma_create);
@@ -250,12 +143,13 @@ EXPORT_SYMBOL_GPL(drm_fb_cma_create);
struct drm_gem_cma_object *drm_fb_cma_get_gem_obj(struct drm_framebuffer *fb,
unsigned int plane)
{
- struct drm_fb_cma *fb_cma = to_fb_cma(fb);
+ struct drm_gem_object *gem;
- if (plane >= 4)
+ gem = drm_gem_fb_get_obj(fb, plane);
+ if (!gem)
return NULL;
- return fb_cma->obj[plane];
+ return to_drm_gem_cma_obj(gem);
}
EXPORT_SYMBOL_GPL(drm_fb_cma_get_gem_obj);
@@ -272,13 +166,14 @@ dma_addr_t drm_fb_cma_get_gem_addr(struct drm_framebuffer *fb,
struct drm_plane_state *state,
unsigned int plane)
{
- struct drm_fb_cma *fb_cma = to_fb_cma(fb);
+ struct drm_gem_cma_object *obj;
dma_addr_t paddr;
- if (plane >= 4)
+ obj = drm_fb_cma_get_gem_obj(fb, plane);
+ if (!obj)
return 0;
- paddr = fb_cma->obj[plane]->paddr + fb->offsets[plane];
+ paddr = obj->paddr + fb->offsets[plane];
paddr += fb->format->cpp[plane] * (state->src_x >> 16);
paddr += fb->pitches[plane] * (state->src_y >> 16);
@@ -302,26 +197,13 @@ EXPORT_SYMBOL_GPL(drm_fb_cma_get_gem_addr);
int drm_fb_cma_prepare_fb(struct drm_plane *plane,
struct drm_plane_state *state)
{
- struct dma_buf *dma_buf;
- struct dma_fence *fence;
-
- if ((plane->state->fb == state->fb) || !state->fb)
- return 0;
-
- dma_buf = drm_fb_cma_get_gem_obj(state->fb, 0)->base.dma_buf;
- if (dma_buf) {
- fence = reservation_object_get_excl_rcu(dma_buf->resv);
- drm_atomic_set_fence_for_plane(state, fence);
- }
-
- return 0;
+ return drm_gem_fb_prepare_fb(plane, state);
}
EXPORT_SYMBOL_GPL(drm_fb_cma_prepare_fb);
#ifdef CONFIG_DEBUG_FS
static void drm_fb_cma_describe(struct drm_framebuffer *fb, struct seq_file *m)
{
- struct drm_fb_cma *fb_cma = to_fb_cma(fb);
int i;
seq_printf(m, "fb: %dx%d@%4.4s\n", fb->width, fb->height,
@@ -330,7 +212,7 @@ static void drm_fb_cma_describe(struct drm_framebuffer *fb, struct seq_file *m)
for (i = 0; i < fb->format->num_planes; i++) {
seq_printf(m, " %d: offset=%d pitch=%d, obj: ",
i, fb->offsets[i], fb->pitches[i]);
- drm_gem_cma_describe(fb_cma->obj[i], m);
+ drm_gem_cma_describe(drm_fb_cma_get_gem_obj(fb, i), m);
}
}
@@ -431,7 +313,6 @@ drm_fbdev_cma_create(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes)
{
struct drm_fbdev_cma *fbdev_cma = to_fbdev_cma(helper);
- struct drm_mode_fb_cmd2 mode_cmd = { 0 };
struct drm_device *dev = helper->dev;
struct drm_gem_cma_object *obj;
struct drm_framebuffer *fb;
@@ -446,14 +327,7 @@ drm_fbdev_cma_create(struct drm_fb_helper *helper,
sizes->surface_bpp);
bytes_per_pixel = DIV_ROUND_UP(sizes->surface_bpp, 8);
-
- mode_cmd.width = sizes->surface_width;
- mode_cmd.height = sizes->surface_height;
- mode_cmd.pitches[0] = sizes->surface_width * bytes_per_pixel;
- mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
- sizes->surface_depth);
-
- size = mode_cmd.pitches[0] * mode_cmd.height;
+ size = sizes->surface_width * sizes->surface_height * bytes_per_pixel;
obj = drm_gem_cma_create(dev, size);
if (IS_ERR(obj))
return -ENOMEM;
@@ -464,15 +338,14 @@ drm_fbdev_cma_create(struct drm_fb_helper *helper,
goto err_gem_free_object;
}
- fbdev_cma->fb = drm_fb_cma_alloc(dev, &mode_cmd, &obj, 1,
- fbdev_cma->fb_funcs);
- if (IS_ERR(fbdev_cma->fb)) {
+ fb = drm_gem_fbdev_fb_create(dev, sizes, 0, &obj->base,
+ fbdev_cma->fb_funcs);
+ if (IS_ERR(fb)) {
dev_err(dev->dev, "Failed to allocate DRM framebuffer.\n");
- ret = PTR_ERR(fbdev_cma->fb);
+ ret = PTR_ERR(fb);
goto err_fb_info_destroy;
}
- fb = &fbdev_cma->fb->fb;
helper->fb = fb;
fbi->par = helper;
@@ -500,7 +373,7 @@ drm_fbdev_cma_create(struct drm_fb_helper *helper,
return 0;
err_cma_destroy:
- drm_framebuffer_remove(&fbdev_cma->fb->fb);
+ drm_framebuffer_remove(fb);
err_fb_info_destroy:
drm_fb_helper_fini(helper);
err_gem_free_object:
@@ -570,6 +443,11 @@ err_free:
}
EXPORT_SYMBOL_GPL(drm_fbdev_cma_init_with_funcs);
+static const struct drm_framebuffer_funcs drm_fb_cma_funcs = {
+ .destroy = drm_gem_fb_destroy,
+ .create_handle = drm_gem_fb_create_handle,
+};
+
/**
* drm_fbdev_cma_init() - Allocate and initializes a drm_fbdev_cma struct
* @dev: DRM device
@@ -597,8 +475,8 @@ void drm_fbdev_cma_fini(struct drm_fbdev_cma *fbdev_cma)
if (fbdev_cma->fb_helper.fbdev)
drm_fbdev_cma_defio_fini(fbdev_cma->fb_helper.fbdev);
- if (fbdev_cma->fb)
- drm_framebuffer_remove(&fbdev_cma->fb->fb);
+ if (fbdev_cma->fb_helper.fb)
+ drm_framebuffer_remove(fbdev_cma->fb_helper.fb);
drm_fb_helper_fini(&fbdev_cma->fb_helper);
kfree(fbdev_cma);
@@ -640,7 +518,7 @@ EXPORT_SYMBOL_GPL(drm_fbdev_cma_hotplug_event);
* Calls drm_fb_helper_set_suspend, which is a wrapper around
* fb_set_suspend implemented by fbdev core.
*/
-void drm_fbdev_cma_set_suspend(struct drm_fbdev_cma *fbdev_cma, int state)
+void drm_fbdev_cma_set_suspend(struct drm_fbdev_cma *fbdev_cma, bool state)
{
if (fbdev_cma)
drm_fb_helper_set_suspend(&fbdev_cma->fb_helper, state);
@@ -657,7 +535,7 @@ EXPORT_SYMBOL(drm_fbdev_cma_set_suspend);
* fb_set_suspend implemented by fbdev core.
*/
void drm_fbdev_cma_set_suspend_unlocked(struct drm_fbdev_cma *fbdev_cma,
- int state)
+ bool state)
{
if (fbdev_cma)
drm_fb_helper_set_suspend_unlocked(&fbdev_cma->fb_helper,
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 574af01d3ce9..1b8f013ffa65 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -106,11 +106,11 @@ static DEFINE_MUTEX(kernel_fb_helper_lock);
*/
#define drm_fb_helper_for_each_connector(fbh, i__) \
- for (({ lockdep_assert_held(&(fbh)->dev->mode_config.mutex); }), \
+ for (({ lockdep_assert_held(&(fbh)->lock); }), \
i__ = 0; i__ < (fbh)->connector_count; i__++)
-int drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper,
- struct drm_connector *connector)
+static int __drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper,
+ struct drm_connector *connector)
{
struct drm_fb_helper_connector *fb_conn;
struct drm_fb_helper_connector **temp;
@@ -119,7 +119,7 @@ int drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper,
if (!drm_fbdev_emulation)
return 0;
- WARN_ON(!mutex_is_locked(&fb_helper->dev->mode_config.mutex));
+ lockdep_assert_held(&fb_helper->lock);
count = fb_helper->connector_count + 1;
@@ -141,8 +141,21 @@ int drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper,
drm_connector_get(connector);
fb_conn->connector = connector;
fb_helper->connector_info[fb_helper->connector_count++] = fb_conn;
+
return 0;
}
+
+int drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper,
+ struct drm_connector *connector)
+{
+ int err;
+
+ mutex_lock(&fb_helper->lock);
+ err = __drm_fb_helper_add_one_connector(fb_helper, connector);
+ mutex_unlock(&fb_helper->lock);
+
+ return err;
+}
EXPORT_SYMBOL(drm_fb_helper_add_one_connector);
/**
@@ -169,11 +182,10 @@ int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper)
if (!drm_fbdev_emulation)
return 0;
- mutex_lock(&dev->mode_config.mutex);
+ mutex_lock(&fb_helper->lock);
drm_connector_list_iter_begin(dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) {
- ret = drm_fb_helper_add_one_connector(fb_helper, connector);
-
+ ret = __drm_fb_helper_add_one_connector(fb_helper, connector);
if (ret)
goto fail;
}
@@ -192,14 +204,14 @@ fail:
fb_helper->connector_count = 0;
out:
drm_connector_list_iter_end(&conn_iter);
- mutex_unlock(&dev->mode_config.mutex);
+ mutex_unlock(&fb_helper->lock);
return ret;
}
EXPORT_SYMBOL(drm_fb_helper_single_add_all_connectors);
-int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
- struct drm_connector *connector)
+static int __drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
+ struct drm_connector *connector)
{
struct drm_fb_helper_connector *fb_helper_connector;
int i, j;
@@ -207,9 +219,9 @@ int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
if (!drm_fbdev_emulation)
return 0;
- WARN_ON(!mutex_is_locked(&fb_helper->dev->mode_config.mutex));
+ lockdep_assert_held(&fb_helper->lock);
- for (i = 0; i < fb_helper->connector_count; i++) {
+ drm_fb_helper_for_each_connector(fb_helper, i) {
if (fb_helper->connector_info[i]->connector == connector)
break;
}
@@ -227,23 +239,19 @@ int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
return 0;
}
-EXPORT_SYMBOL(drm_fb_helper_remove_one_connector);
-static void drm_fb_helper_save_lut_atomic(struct drm_crtc *crtc, struct drm_fb_helper *helper)
+int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
+ struct drm_connector *connector)
{
- uint16_t *r_base, *g_base, *b_base;
- int i;
-
- if (helper->funcs->gamma_get == NULL)
- return;
+ int err;
- r_base = crtc->gamma_store;
- g_base = r_base + crtc->gamma_size;
- b_base = g_base + crtc->gamma_size;
+ mutex_lock(&fb_helper->lock);
+ err = __drm_fb_helper_remove_one_connector(fb_helper, connector);
+ mutex_unlock(&fb_helper->lock);
- for (i = 0; i < crtc->gamma_size; i++)
- helper->funcs->gamma_get(crtc, &r_base[i], &g_base[i], &b_base[i], i);
+ return err;
}
+EXPORT_SYMBOL(drm_fb_helper_remove_one_connector);
static void drm_fb_helper_restore_lut_atomic(struct drm_crtc *crtc)
{
@@ -285,7 +293,6 @@ int drm_fb_helper_debug_enter(struct fb_info *info)
if (drm_drv_uses_atomic_modeset(mode_set->crtc->dev))
continue;
- drm_fb_helper_save_lut_atomic(mode_set->crtc, helper);
funcs->mode_set_base_atomic(mode_set->crtc,
mode_set->fb,
mode_set->x,
@@ -298,20 +305,6 @@ int drm_fb_helper_debug_enter(struct fb_info *info)
}
EXPORT_SYMBOL(drm_fb_helper_debug_enter);
-/* Find the real fb for a given fb helper CRTC */
-static struct drm_framebuffer *drm_mode_config_fb(struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_crtc *c;
-
- drm_for_each_crtc(c, dev) {
- if (crtc->base.id == c->base.id)
- return c->primary->fb;
- }
-
- return NULL;
-}
-
/**
* drm_fb_helper_debug_leave - implementation for &fb_ops.fb_debug_leave
* @info: fbdev registered by the helper
@@ -328,8 +321,11 @@ int drm_fb_helper_debug_leave(struct fb_info *info)
struct drm_mode_set *mode_set = &helper->crtc_info[i].mode_set;
crtc = mode_set->crtc;
+ if (drm_drv_uses_atomic_modeset(crtc->dev))
+ continue;
+
funcs = crtc->helper_private;
- fb = drm_mode_config_fb(crtc);
+ fb = crtc->primary->fb;
if (!crtc->enabled)
continue;
@@ -342,9 +338,6 @@ int drm_fb_helper_debug_leave(struct fb_info *info)
if (funcs->mode_set_base_atomic == NULL)
continue;
- if (drm_drv_uses_atomic_modeset(crtc->dev))
- continue;
-
drm_fb_helper_restore_lut_atomic(mode_set->crtc);
funcs->mode_set_base_atomic(mode_set->crtc, fb, crtc->x,
crtc->y, LEAVE_ATOMIC_MODE_SET);
@@ -354,19 +347,24 @@ int drm_fb_helper_debug_leave(struct fb_info *info)
}
EXPORT_SYMBOL(drm_fb_helper_debug_leave);
-static int restore_fbdev_mode_atomic(struct drm_fb_helper *fb_helper)
+static int restore_fbdev_mode_atomic(struct drm_fb_helper *fb_helper, bool active)
{
struct drm_device *dev = fb_helper->dev;
struct drm_plane *plane;
struct drm_atomic_state *state;
int i, ret;
unsigned int plane_mask;
+ struct drm_modeset_acquire_ctx ctx;
+
+ drm_modeset_acquire_init(&ctx, 0);
state = drm_atomic_state_alloc(dev);
- if (!state)
- return -ENOMEM;
+ if (!state) {
+ ret = -ENOMEM;
+ goto out_ctx;
+ }
- state->acquire_ctx = dev->mode_config.acquire_ctx;
+ state->acquire_ctx = &ctx;
retry:
plane_mask = 0;
drm_for_each_plane(plane, dev) {
@@ -375,7 +373,7 @@ retry:
plane_state = drm_atomic_get_plane_state(state, plane);
if (IS_ERR(plane_state)) {
ret = PTR_ERR(plane_state);
- goto fail;
+ goto out_state;
}
plane_state->rotation = DRM_MODE_ROTATE_0;
@@ -389,7 +387,7 @@ retry:
ret = __drm_atomic_helper_disable_plane(plane, plane_state);
if (ret != 0)
- goto fail;
+ goto out_state;
}
for (i = 0; i < fb_helper->crtc_count; i++) {
@@ -397,23 +395,38 @@ retry:
ret = __drm_atomic_helper_set_config(mode_set, state);
if (ret != 0)
- goto fail;
+ goto out_state;
+
+ /*
+ * __drm_atomic_helper_set_config() sets active when a
+ * mode is set, unconditionally clear it if we force DPMS off
+ */
+ if (!active) {
+ struct drm_crtc *crtc = mode_set->crtc;
+ struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+
+ crtc_state->active = false;
+ }
}
ret = drm_atomic_commit(state);
-fail:
+out_state:
drm_atomic_clean_old_fb(dev, plane_mask, ret);
if (ret == -EDEADLK)
goto backoff;
drm_atomic_state_put(state);
+out_ctx:
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+
return ret;
backoff:
drm_atomic_state_clear(state);
- drm_atomic_legacy_backoff(state);
+ drm_modeset_backoff(&ctx);
goto retry;
}
@@ -422,8 +435,9 @@ static int restore_fbdev_mode_legacy(struct drm_fb_helper *fb_helper)
{
struct drm_device *dev = fb_helper->dev;
struct drm_plane *plane;
- int i;
+ int i, ret = 0;
+ drm_modeset_lock_all(fb_helper->dev);
drm_for_each_plane(plane, dev) {
if (plane->type != DRM_PLANE_TYPE_PRIMARY)
drm_plane_force_disable(plane);
@@ -437,34 +451,33 @@ static int restore_fbdev_mode_legacy(struct drm_fb_helper *fb_helper)
for (i = 0; i < fb_helper->crtc_count; i++) {
struct drm_mode_set *mode_set = &fb_helper->crtc_info[i].mode_set;
struct drm_crtc *crtc = mode_set->crtc;
- int ret;
if (crtc->funcs->cursor_set2) {
ret = crtc->funcs->cursor_set2(crtc, NULL, 0, 0, 0, 0, 0);
if (ret)
- return ret;
+ goto out;
} else if (crtc->funcs->cursor_set) {
ret = crtc->funcs->cursor_set(crtc, NULL, 0, 0, 0);
if (ret)
- return ret;
+ goto out;
}
ret = drm_mode_set_config_internal(mode_set);
if (ret)
- return ret;
+ goto out;
}
+out:
+ drm_modeset_unlock_all(fb_helper->dev);
- return 0;
+ return ret;
}
static int restore_fbdev_mode(struct drm_fb_helper *fb_helper)
{
struct drm_device *dev = fb_helper->dev;
- drm_warn_on_modeset_not_all_locked(dev);
-
if (drm_drv_uses_atomic_modeset(dev))
- return restore_fbdev_mode_atomic(fb_helper);
+ return restore_fbdev_mode_atomic(fb_helper, true);
else
return restore_fbdev_mode_legacy(fb_helper);
}
@@ -482,23 +495,26 @@ static int restore_fbdev_mode(struct drm_fb_helper *fb_helper)
*/
int drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper)
{
- struct drm_device *dev = fb_helper->dev;
bool do_delayed;
int ret;
if (!drm_fbdev_emulation)
return -ENODEV;
- drm_modeset_lock_all(dev);
+ if (READ_ONCE(fb_helper->deferred_setup))
+ return 0;
+
+ mutex_lock(&fb_helper->lock);
ret = restore_fbdev_mode(fb_helper);
do_delayed = fb_helper->delayed_hotplug;
if (do_delayed)
fb_helper->delayed_hotplug = false;
- drm_modeset_unlock_all(dev);
+ mutex_unlock(&fb_helper->lock);
if (do_delayed)
drm_fb_helper_hotplug_event(fb_helper);
+
return ret;
}
EXPORT_SYMBOL(drm_fb_helper_restore_fbdev_mode_unlocked);
@@ -517,10 +533,12 @@ static bool drm_fb_helper_is_bound(struct drm_fb_helper *fb_helper)
return false;
drm_for_each_crtc(crtc, dev) {
+ drm_modeset_lock(&crtc->mutex, NULL);
if (crtc->primary->fb)
crtcs_bound++;
if (crtc->primary->fb == fb_helper->fb)
bound++;
+ drm_modeset_unlock(&crtc->mutex);
}
if (bound < crtcs_bound)
@@ -548,11 +566,11 @@ static bool drm_fb_helper_force_kernel_mode(void)
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
continue;
- drm_modeset_lock_all(dev);
+ mutex_lock(&helper->lock);
ret = restore_fbdev_mode(helper);
if (ret)
error = true;
- drm_modeset_unlock_all(dev);
+ mutex_unlock(&helper->lock);
}
return error;
}
@@ -581,23 +599,14 @@ static struct sysrq_key_op sysrq_drm_fb_helper_restore_op = {
static struct sysrq_key_op sysrq_drm_fb_helper_restore_op = { };
#endif
-static void drm_fb_helper_dpms(struct fb_info *info, int dpms_mode)
+static void dpms_legacy(struct drm_fb_helper *fb_helper, int dpms_mode)
{
- struct drm_fb_helper *fb_helper = info->par;
struct drm_device *dev = fb_helper->dev;
struct drm_crtc *crtc;
struct drm_connector *connector;
int i, j;
- /*
- * For each CRTC in this fb, turn the connectors on/off.
- */
drm_modeset_lock_all(dev);
- if (!drm_fb_helper_is_bound(fb_helper)) {
- drm_modeset_unlock_all(dev);
- return;
- }
-
for (i = 0; i < fb_helper->crtc_count; i++) {
crtc = fb_helper->crtc_info[i].mode_set.crtc;
@@ -615,6 +624,26 @@ static void drm_fb_helper_dpms(struct fb_info *info, int dpms_mode)
drm_modeset_unlock_all(dev);
}
+static void drm_fb_helper_dpms(struct fb_info *info, int dpms_mode)
+{
+ struct drm_fb_helper *fb_helper = info->par;
+
+ /*
+ * For each CRTC in this fb, turn the connectors on/off.
+ */
+ mutex_lock(&fb_helper->lock);
+ if (!drm_fb_helper_is_bound(fb_helper)) {
+ mutex_unlock(&fb_helper->lock);
+ return;
+ }
+
+ if (drm_drv_uses_atomic_modeset(fb_helper->dev))
+ restore_fbdev_mode_atomic(fb_helper, dpms_mode == DRM_MODE_DPMS_ON);
+ else
+ dpms_legacy(fb_helper, dpms_mode);
+ mutex_unlock(&fb_helper->lock);
+}
+
/**
* drm_fb_helper_blank - implementation for &fb_ops.fb_blank
* @blank: desired blanking state
@@ -734,6 +763,7 @@ void drm_fb_helper_prepare(struct drm_device *dev, struct drm_fb_helper *helper,
INIT_WORK(&helper->resume_work, drm_fb_helper_resume_worker);
INIT_WORK(&helper->dirty_work, drm_fb_helper_dirty_work);
helper->dirty_clip.x1 = helper->dirty_clip.y1 = ~0;
+ mutex_init(&helper->lock);
helper->funcs = funcs;
helper->dev = dev;
}
@@ -899,6 +929,7 @@ void drm_fb_helper_fini(struct drm_fb_helper *fb_helper)
}
mutex_unlock(&kernel_fb_helper_lock);
+ mutex_destroy(&fb_helper->lock);
drm_fb_helper_crtc_free(fb_helper);
}
@@ -1167,22 +1198,23 @@ void drm_fb_helper_set_suspend_unlocked(struct drm_fb_helper *fb_helper,
}
EXPORT_SYMBOL(drm_fb_helper_set_suspend_unlocked);
-static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green,
- u16 blue, u16 regno, struct fb_info *info)
+static int setcmap_pseudo_palette(struct fb_cmap *cmap, struct fb_info *info)
{
- struct drm_fb_helper *fb_helper = info->par;
- struct drm_framebuffer *fb = fb_helper->fb;
+ u32 *palette = (u32 *)info->pseudo_palette;
+ int i;
- if (info->fix.visual == FB_VISUAL_TRUECOLOR) {
- u32 *palette;
+ if (cmap->start + cmap->len > 16)
+ return -EINVAL;
+
+ for (i = 0; i < cmap->len; ++i) {
+ u16 red = cmap->red[i];
+ u16 green = cmap->green[i];
+ u16 blue = cmap->blue[i];
u32 value;
- /* place color in psuedopalette */
- if (regno > 16)
- return -EINVAL;
- palette = (u32 *)info->pseudo_palette;
- red >>= (16 - info->var.red.length);
- green >>= (16 - info->var.green.length);
- blue >>= (16 - info->var.blue.length);
+
+ red >>= 16 - info->var.red.length;
+ green >>= 16 - info->var.green.length;
+ blue >>= 16 - info->var.blue.length;
value = (red << info->var.red.offset) |
(green << info->var.green.offset) |
(blue << info->var.blue.offset);
@@ -1192,23 +1224,169 @@ static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green,
mask <<= info->var.transp.offset;
value |= mask;
}
- palette[regno] = value;
- return 0;
+ palette[cmap->start + i] = value;
}
- /*
- * The driver really shouldn't advertise pseudo/directcolor
- * visuals if it can't deal with the palette.
- */
- if (WARN_ON(!fb_helper->funcs->gamma_set ||
- !fb_helper->funcs->gamma_get))
- return -EINVAL;
+ return 0;
+}
+
+static int setcmap_legacy(struct fb_cmap *cmap, struct fb_info *info)
+{
+ struct drm_fb_helper *fb_helper = info->par;
+ struct drm_crtc *crtc;
+ u16 *r, *g, *b;
+ int i, ret = 0;
- WARN_ON(fb->format->cpp[0] != 1);
+ drm_modeset_lock_all(fb_helper->dev);
+ for (i = 0; i < fb_helper->crtc_count; i++) {
+ crtc = fb_helper->crtc_info[i].mode_set.crtc;
+ if (!crtc->funcs->gamma_set || !crtc->gamma_size)
+ return -EINVAL;
- fb_helper->funcs->gamma_set(crtc, red, green, blue, regno);
+ if (cmap->start + cmap->len > crtc->gamma_size)
+ return -EINVAL;
- return 0;
+ r = crtc->gamma_store;
+ g = r + crtc->gamma_size;
+ b = g + crtc->gamma_size;
+
+ memcpy(r + cmap->start, cmap->red, cmap->len * sizeof(*r));
+ memcpy(g + cmap->start, cmap->green, cmap->len * sizeof(*g));
+ memcpy(b + cmap->start, cmap->blue, cmap->len * sizeof(*b));
+
+ ret = crtc->funcs->gamma_set(crtc, r, g, b,
+ crtc->gamma_size, NULL);
+ if (ret)
+ return ret;
+ }
+ drm_modeset_unlock_all(fb_helper->dev);
+
+ return ret;
+}
+
+static struct drm_property_blob *setcmap_new_gamma_lut(struct drm_crtc *crtc,
+ struct fb_cmap *cmap)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_property_blob *gamma_lut;
+ struct drm_color_lut *lut;
+ int size = crtc->gamma_size;
+ int i;
+
+ if (!size || cmap->start + cmap->len > size)
+ return ERR_PTR(-EINVAL);
+
+ gamma_lut = drm_property_create_blob(dev, sizeof(*lut) * size, NULL);
+ if (IS_ERR(gamma_lut))
+ return gamma_lut;
+
+ lut = (struct drm_color_lut *)gamma_lut->data;
+ if (cmap->start || cmap->len != size) {
+ u16 *r = crtc->gamma_store;
+ u16 *g = r + crtc->gamma_size;
+ u16 *b = g + crtc->gamma_size;
+
+ for (i = 0; i < cmap->start; i++) {
+ lut[i].red = r[i];
+ lut[i].green = g[i];
+ lut[i].blue = b[i];
+ }
+ for (i = cmap->start + cmap->len; i < size; i++) {
+ lut[i].red = r[i];
+ lut[i].green = g[i];
+ lut[i].blue = b[i];
+ }
+ }
+
+ for (i = 0; i < cmap->len; i++) {
+ lut[cmap->start + i].red = cmap->red[i];
+ lut[cmap->start + i].green = cmap->green[i];
+ lut[cmap->start + i].blue = cmap->blue[i];
+ }
+
+ return gamma_lut;
+}
+
+static int setcmap_atomic(struct fb_cmap *cmap, struct fb_info *info)
+{
+ struct drm_fb_helper *fb_helper = info->par;
+ struct drm_device *dev = fb_helper->dev;
+ struct drm_property_blob *gamma_lut = NULL;
+ struct drm_modeset_acquire_ctx ctx;
+ struct drm_crtc_state *crtc_state;
+ struct drm_atomic_state *state;
+ struct drm_crtc *crtc;
+ u16 *r, *g, *b;
+ int i, ret = 0;
+ bool replaced;
+
+ drm_modeset_acquire_init(&ctx, 0);
+
+ state = drm_atomic_state_alloc(dev);
+ if (!state) {
+ ret = -ENOMEM;
+ goto out_ctx;
+ }
+
+ state->acquire_ctx = &ctx;
+retry:
+ for (i = 0; i < fb_helper->crtc_count; i++) {
+ crtc = fb_helper->crtc_info[i].mode_set.crtc;
+
+ if (!gamma_lut)
+ gamma_lut = setcmap_new_gamma_lut(crtc, cmap);
+ if (IS_ERR(gamma_lut)) {
+ ret = PTR_ERR(gamma_lut);
+ gamma_lut = NULL;
+ goto out_state;
+ }
+
+ crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(crtc_state)) {
+ ret = PTR_ERR(crtc_state);
+ goto out_state;
+ }
+
+ replaced = drm_property_replace_blob(&crtc_state->degamma_lut,
+ NULL);
+ replaced |= drm_property_replace_blob(&crtc_state->ctm, NULL);
+ replaced |= drm_property_replace_blob(&crtc_state->gamma_lut,
+ gamma_lut);
+ crtc_state->color_mgmt_changed |= replaced;
+ }
+
+ ret = drm_atomic_commit(state);
+ if (ret)
+ goto out_state;
+
+ for (i = 0; i < fb_helper->crtc_count; i++) {
+ crtc = fb_helper->crtc_info[i].mode_set.crtc;
+
+ r = crtc->gamma_store;
+ g = r + crtc->gamma_size;
+ b = g + crtc->gamma_size;
+
+ memcpy(r + cmap->start, cmap->red, cmap->len * sizeof(*r));
+ memcpy(g + cmap->start, cmap->green, cmap->len * sizeof(*g));
+ memcpy(b + cmap->start, cmap->blue, cmap->len * sizeof(*b));
+ }
+
+out_state:
+ if (ret == -EDEADLK)
+ goto backoff;
+
+ drm_property_blob_put(gamma_lut);
+ drm_atomic_state_put(state);
+out_ctx:
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+
+ return ret;
+
+backoff:
+ drm_atomic_state_clear(state);
+ drm_modeset_backoff(&ctx);
+ goto retry;
}
/**
@@ -1219,52 +1397,29 @@ static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green,
int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info)
{
struct drm_fb_helper *fb_helper = info->par;
- struct drm_device *dev = fb_helper->dev;
- const struct drm_crtc_helper_funcs *crtc_funcs;
- u16 *red, *green, *blue, *transp;
- struct drm_crtc *crtc;
- int i, j, rc = 0;
- int start;
+ int ret;
if (oops_in_progress)
return -EBUSY;
- drm_modeset_lock_all(dev);
+ mutex_lock(&fb_helper->lock);
+
if (!drm_fb_helper_is_bound(fb_helper)) {
- drm_modeset_unlock_all(dev);
- return -EBUSY;
+ ret = -EBUSY;
+ goto out;
}
- for (i = 0; i < fb_helper->crtc_count; i++) {
- crtc = fb_helper->crtc_info[i].mode_set.crtc;
- crtc_funcs = crtc->helper_private;
-
- red = cmap->red;
- green = cmap->green;
- blue = cmap->blue;
- transp = cmap->transp;
- start = cmap->start;
-
- for (j = 0; j < cmap->len; j++) {
- u16 hred, hgreen, hblue, htransp = 0xffff;
-
- hred = *red++;
- hgreen = *green++;
- hblue = *blue++;
+ if (info->fix.visual == FB_VISUAL_TRUECOLOR)
+ ret = setcmap_pseudo_palette(cmap, info);
+ else if (drm_drv_uses_atomic_modeset(fb_helper->dev))
+ ret = setcmap_atomic(cmap, info);
+ else
+ ret = setcmap_legacy(cmap, info);
- if (transp)
- htransp = *transp++;
+out:
+ mutex_unlock(&fb_helper->lock);
- rc = setcolreg(crtc, hred, hgreen, hblue, start++, info);
- if (rc)
- goto out;
- }
- if (crtc_funcs->load_lut)
- crtc_funcs->load_lut(crtc);
- }
- out:
- drm_modeset_unlock_all(dev);
- return rc;
+ return ret;
}
EXPORT_SYMBOL(drm_fb_helper_setcmap);
@@ -1281,12 +1436,11 @@ int drm_fb_helper_ioctl(struct fb_info *info, unsigned int cmd,
unsigned long arg)
{
struct drm_fb_helper *fb_helper = info->par;
- struct drm_device *dev = fb_helper->dev;
struct drm_mode_set *mode_set;
struct drm_crtc *crtc;
int ret = 0;
- mutex_lock(&dev->mode_config.mutex);
+ mutex_lock(&fb_helper->lock);
if (!drm_fb_helper_is_bound(fb_helper)) {
ret = -EBUSY;
goto unlock;
@@ -1331,7 +1485,7 @@ int drm_fb_helper_ioctl(struct fb_info *info, unsigned int cmd,
}
unlock:
- mutex_unlock(&dev->mode_config.mutex);
+ mutex_unlock(&fb_helper->lock);
return ret;
}
EXPORT_SYMBOL(drm_fb_helper_ioctl);
@@ -1463,61 +1617,36 @@ int drm_fb_helper_set_par(struct fb_info *info)
}
EXPORT_SYMBOL(drm_fb_helper_set_par);
-static int pan_display_atomic(struct fb_var_screeninfo *var,
- struct fb_info *info)
+static void pan_set(struct drm_fb_helper *fb_helper, int x, int y)
{
- struct drm_fb_helper *fb_helper = info->par;
- struct drm_device *dev = fb_helper->dev;
- struct drm_atomic_state *state;
- struct drm_plane *plane;
- int i, ret;
- unsigned int plane_mask;
-
- state = drm_atomic_state_alloc(dev);
- if (!state)
- return -ENOMEM;
+ int i;
- state->acquire_ctx = dev->mode_config.acquire_ctx;
-retry:
- plane_mask = 0;
for (i = 0; i < fb_helper->crtc_count; i++) {
struct drm_mode_set *mode_set;
mode_set = &fb_helper->crtc_info[i].mode_set;
- mode_set->x = var->xoffset;
- mode_set->y = var->yoffset;
-
- ret = __drm_atomic_helper_set_config(mode_set, state);
- if (ret != 0)
- goto fail;
-
- plane = mode_set->crtc->primary;
- plane_mask |= (1 << drm_plane_index(plane));
- plane->old_fb = plane->fb;
+ mode_set->x = x;
+ mode_set->y = y;
}
+}
- ret = drm_atomic_commit(state);
- if (ret != 0)
- goto fail;
+static int pan_display_atomic(struct fb_var_screeninfo *var,
+ struct fb_info *info)
+{
+ struct drm_fb_helper *fb_helper = info->par;
+ int ret;
- info->var.xoffset = var->xoffset;
- info->var.yoffset = var->yoffset;
+ pan_set(fb_helper, var->xoffset, var->yoffset);
-fail:
- drm_atomic_clean_old_fb(dev, plane_mask, ret);
+ ret = restore_fbdev_mode_atomic(fb_helper, true);
+ if (!ret) {
+ info->var.xoffset = var->xoffset;
+ info->var.yoffset = var->yoffset;
+ } else
+ pan_set(fb_helper, info->var.xoffset, info->var.yoffset);
- if (ret == -EDEADLK)
- goto backoff;
-
- drm_atomic_state_put(state);
return ret;
-
-backoff:
- drm_atomic_state_clear(state);
- drm_atomic_legacy_backoff(state);
-
- goto retry;
}
static int pan_display_legacy(struct fb_var_screeninfo *var,
@@ -1528,6 +1657,7 @@ static int pan_display_legacy(struct fb_var_screeninfo *var,
int ret = 0;
int i;
+ drm_modeset_lock_all(fb_helper->dev);
for (i = 0; i < fb_helper->crtc_count; i++) {
modeset = &fb_helper->crtc_info[i].mode_set;
@@ -1542,6 +1672,7 @@ static int pan_display_legacy(struct fb_var_screeninfo *var,
}
}
}
+ drm_modeset_unlock_all(fb_helper->dev);
return ret;
}
@@ -1561,9 +1692,9 @@ int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
if (oops_in_progress)
return -EBUSY;
- drm_modeset_lock_all(dev);
+ mutex_lock(&fb_helper->lock);
if (!drm_fb_helper_is_bound(fb_helper)) {
- drm_modeset_unlock_all(dev);
+ mutex_unlock(&fb_helper->lock);
return -EBUSY;
}
@@ -1571,7 +1702,7 @@ int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
ret = pan_display_atomic(var, info);
else
ret = pan_display_legacy(var, info);
- drm_modeset_unlock_all(dev);
+ mutex_unlock(&fb_helper->lock);
return ret;
}
@@ -1579,8 +1710,7 @@ EXPORT_SYMBOL(drm_fb_helper_pan_display);
/*
* Allocates the backing storage and sets up the fbdev info structure through
- * the ->fb_probe callback and then registers the fbdev and sets up the panic
- * notifier.
+ * the ->fb_probe callback.
*/
static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
int preferred_bpp)
@@ -1678,13 +1808,8 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
}
if (crtc_count == 0 || sizes.fb_width == -1 || sizes.fb_height == -1) {
- /*
- * hmm everyone went away - assume VGA cable just fell out
- * and will come back later.
- */
- DRM_INFO("Cannot find any crtc or sizes - going 1024x768\n");
- sizes.fb_width = sizes.surface_width = 1024;
- sizes.fb_height = sizes.surface_height = 768;
+ DRM_INFO("Cannot find any crtc or sizes\n");
+ return -EAGAIN;
}
/* Handle our overallocation */
@@ -1696,17 +1821,6 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
if (ret < 0)
return ret;
- /*
- * Set the fb pointer - usually drm_setup_crtcs does this for hotplug
- * events, but at init time drm_setup_crtcs needs to be called before
- * the fb is allocated (since we need to figure out the desired size of
- * the fb before we can allocate it ...). Hence we need to fix things up
- * here again.
- */
- for (i = 0; i < fb_helper->crtc_count; i++)
- if (fb_helper->crtc_info[i].mode_set.num_connectors)
- fb_helper->crtc_info[i].mode_set.fb = fb_helper->fb;
-
return 0;
}
@@ -1768,8 +1882,6 @@ void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helpe
info->var.xoffset = 0;
info->var.yoffset = 0;
info->var.activate = FB_ACTIVATE_NOW;
- info->var.height = -1;
- info->var.width = -1;
switch (fb->format->depth) {
case 8:
@@ -1831,12 +1943,11 @@ void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helpe
EXPORT_SYMBOL(drm_fb_helper_fill_var);
static int drm_fb_helper_probe_connector_modes(struct drm_fb_helper *fb_helper,
- uint32_t maxX,
- uint32_t maxY)
+ uint32_t maxX,
+ uint32_t maxY)
{
struct drm_connector *connector;
- int count = 0;
- int i;
+ int i, count = 0;
drm_fb_helper_for_each_connector(fb_helper, i) {
connector = fb_helper->connector_info[i]->connector;
@@ -2234,11 +2345,8 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper,
int i;
DRM_DEBUG_KMS("\n");
- if (drm_fb_helper_probe_connector_modes(fb_helper, width, height) == 0)
- DRM_DEBUG_KMS("No connectors reported connected with modes\n");
-
/* prevent concurrent modification of connector_count by hotplug */
- lockdep_assert_held(&fb_helper->dev->mode_config.mutex);
+ lockdep_assert_held(&fb_helper->lock);
crtcs = kcalloc(fb_helper->connector_count,
sizeof(struct drm_fb_helper_crtc *), GFP_KERNEL);
@@ -2253,6 +2361,9 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper,
goto out;
}
+ mutex_lock(&fb_helper->dev->mode_config.mutex);
+ if (drm_fb_helper_probe_connector_modes(fb_helper, width, height) == 0)
+ DRM_DEBUG_KMS("No connectors reported connected with modes\n");
drm_enable_connectors(fb_helper, enabled);
if (!(fb_helper->funcs->initial_config &&
@@ -2274,6 +2385,7 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper,
drm_pick_crtcs(fb_helper, crtcs, modes, 0, width, height);
}
+ mutex_unlock(&fb_helper->dev->mode_config.mutex);
/* need to set the modesets up here for use later */
/* fill out the connector<->crtc mappings into the modesets */
@@ -2285,9 +2397,9 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper,
struct drm_display_mode *mode = modes[i];
struct drm_fb_helper_crtc *fb_crtc = crtcs[i];
struct drm_fb_offset *offset = &offsets[i];
- struct drm_mode_set *modeset = &fb_crtc->mode_set;
if (mode && fb_crtc) {
+ struct drm_mode_set *modeset = &fb_crtc->mode_set;
struct drm_connector *connector =
fb_helper->connector_info[i]->connector;
@@ -2301,7 +2413,6 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper,
fb_crtc->desired_mode);
drm_connector_get(connector);
modeset->connectors[modeset->num_connectors++] = connector;
- modeset->fb = fb_helper->fb;
modeset->x = offset->x;
modeset->y = offset->y;
}
@@ -2313,6 +2424,91 @@ out:
kfree(enabled);
}
+/*
+ * This is a continuation of drm_setup_crtcs() that sets up anything related
+ * to the framebuffer. During initialization, drm_setup_crtcs() is called before
+ * the framebuffer has been allocated (fb_helper->fb and fb_helper->fbdev).
+ * So, any setup that touches those fields needs to be done here instead of in
+ * drm_setup_crtcs().
+ */
+static void drm_setup_crtcs_fb(struct drm_fb_helper *fb_helper)
+{
+ struct fb_info *info = fb_helper->fbdev;
+ int i;
+
+ for (i = 0; i < fb_helper->crtc_count; i++)
+ if (fb_helper->crtc_info[i].mode_set.num_connectors)
+ fb_helper->crtc_info[i].mode_set.fb = fb_helper->fb;
+
+ mutex_lock(&fb_helper->dev->mode_config.mutex);
+ drm_fb_helper_for_each_connector(fb_helper, i) {
+ struct drm_connector *connector =
+ fb_helper->connector_info[i]->connector;
+
+ /* use first connected connector for the physical dimensions */
+ if (connector->status == connector_status_connected) {
+ info->var.width = connector->display_info.width_mm;
+ info->var.height = connector->display_info.height_mm;
+ break;
+ }
+ }
+ mutex_unlock(&fb_helper->dev->mode_config.mutex);
+}
+
+/* Note: Drops fb_helper->lock before returning. */
+static int
+__drm_fb_helper_initial_config_and_unlock(struct drm_fb_helper *fb_helper,
+ int bpp_sel)
+{
+ struct drm_device *dev = fb_helper->dev;
+ struct fb_info *info;
+ unsigned int width, height;
+ int ret;
+
+ width = dev->mode_config.max_width;
+ height = dev->mode_config.max_height;
+
+ drm_setup_crtcs(fb_helper, width, height);
+ ret = drm_fb_helper_single_fb_probe(fb_helper, bpp_sel);
+ if (ret < 0) {
+ if (ret == -EAGAIN) {
+ fb_helper->preferred_bpp = bpp_sel;
+ fb_helper->deferred_setup = true;
+ ret = 0;
+ }
+ mutex_unlock(&fb_helper->lock);
+
+ return ret;
+ }
+ drm_setup_crtcs_fb(fb_helper);
+
+ fb_helper->deferred_setup = false;
+
+ info = fb_helper->fbdev;
+ info->var.pixclock = 0;
+
+ /* Need to drop locks to avoid recursive deadlock in
+ * register_framebuffer. This is ok because the only thing left to do is
+ * register the fbdev emulation instance in kernel_fb_helper_list. */
+ mutex_unlock(&fb_helper->lock);
+
+ ret = register_framebuffer(info);
+ if (ret < 0)
+ return ret;
+
+ dev_info(dev->dev, "fb%d: %s frame buffer device\n",
+ info->node, info->fix.id);
+
+ mutex_lock(&kernel_fb_helper_lock);
+ if (list_empty(&kernel_fb_helper_list))
+ register_sysrq_key('v', &sysrq_drm_fb_helper_restore_op);
+
+ list_add(&fb_helper->kernel_fb_list, &kernel_fb_helper_list);
+ mutex_unlock(&kernel_fb_helper_lock);
+
+ return 0;
+}
+
/**
* drm_fb_helper_initial_config - setup a sane initial connector configuration
* @fb_helper: fb_helper device struct
@@ -2357,39 +2553,15 @@ out:
*/
int drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel)
{
- struct drm_device *dev = fb_helper->dev;
- struct fb_info *info;
int ret;
if (!drm_fbdev_emulation)
return 0;
- mutex_lock(&dev->mode_config.mutex);
- drm_setup_crtcs(fb_helper,
- dev->mode_config.max_width,
- dev->mode_config.max_height);
- ret = drm_fb_helper_single_fb_probe(fb_helper, bpp_sel);
- mutex_unlock(&dev->mode_config.mutex);
- if (ret)
- return ret;
+ mutex_lock(&fb_helper->lock);
+ ret = __drm_fb_helper_initial_config_and_unlock(fb_helper, bpp_sel);
- info = fb_helper->fbdev;
- info->var.pixclock = 0;
- ret = register_framebuffer(info);
- if (ret < 0)
- return ret;
-
- dev_info(dev->dev, "fb%d: %s frame buffer device\n",
- info->node, info->fix.id);
-
- mutex_lock(&kernel_fb_helper_lock);
- if (list_empty(&kernel_fb_helper_list))
- register_sysrq_key('v', &sysrq_drm_fb_helper_restore_op);
-
- list_add(&fb_helper->kernel_fb_list, &kernel_fb_helper_list);
- mutex_unlock(&kernel_fb_helper_lock);
-
- return 0;
+ return ret;
}
EXPORT_SYMBOL(drm_fb_helper_initial_config);
@@ -2416,22 +2588,29 @@ EXPORT_SYMBOL(drm_fb_helper_initial_config);
*/
int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
{
- struct drm_device *dev = fb_helper->dev;
+ int err = 0;
if (!drm_fbdev_emulation)
return 0;
- mutex_lock(&dev->mode_config.mutex);
+ mutex_lock(&fb_helper->lock);
+ if (fb_helper->deferred_setup) {
+ err = __drm_fb_helper_initial_config_and_unlock(fb_helper,
+ fb_helper->preferred_bpp);
+ return err;
+ }
+
if (!fb_helper->fb || !drm_fb_helper_is_bound(fb_helper)) {
fb_helper->delayed_hotplug = true;
- mutex_unlock(&dev->mode_config.mutex);
- return 0;
+ mutex_unlock(&fb_helper->lock);
+ return err;
}
+
DRM_DEBUG_KMS("\n");
drm_setup_crtcs(fb_helper, fb_helper->fb->width, fb_helper->fb->height);
-
- mutex_unlock(&dev->mode_config.mutex);
+ drm_setup_crtcs_fb(fb_helper);
+ mutex_unlock(&fb_helper->lock);
drm_fb_helper_set_par(fb_helper->fbdev);
diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
index 84f3a242cc39..b3c6e997ccdb 100644
--- a/drivers/gpu/drm/drm_file.c
+++ b/drivers/gpu/drm/drm_file.c
@@ -75,7 +75,7 @@ DEFINE_MUTEX(drm_global_mutex);
* for drivers which use the CMA GEM helpers it's drm_gem_cma_mmap().
*
* No other file operations are supported by the DRM userspace API. Overall the
- * following is an example #file_operations structure::
+ * following is an example &file_operations structure::
*
* static const example_drm_fops = {
* .owner = THIS_MODULE,
@@ -92,6 +92,11 @@ DEFINE_MUTEX(drm_global_mutex);
* For plain GEM based drivers there is the DEFINE_DRM_GEM_FOPS() macro, and for
* CMA based drivers there is the DEFINE_DRM_GEM_CMA_FOPS() macro to make this
* simpler.
+ *
+ * The driver's &file_operations must be stored in &drm_driver.fops.
+ *
+ * For driver-private IOCTL handling see the more detailed discussion in
+ * :ref:`IOCTL support in the userland interfaces chapter<drm_driver_ioctl>`.
*/
static int drm_open_helper(struct file *filp, struct drm_minor *minor);
@@ -431,7 +436,7 @@ int drm_release(struct inode *inode, struct file *filp)
if (!--dev->open_count) {
drm_lastclose(dev);
- if (drm_device_is_unplugged(dev))
+ if (drm_dev_is_unplugged(dev))
drm_put_dev(dev);
}
mutex_unlock(&drm_global_mutex);
diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c
index b3ef4f1c2630..af279844d7ce 100644
--- a/drivers/gpu/drm/drm_framebuffer.c
+++ b/drivers/gpu/drm/drm_framebuffer.c
@@ -817,7 +817,7 @@ retry:
plane->old_fb = plane->fb;
}
- for_each_connector_in_state(state, conn, conn_state, i) {
+ for_each_new_connector_in_state(state, conn, conn_state, i) {
ret = drm_atomic_set_crtc_for_connector(conn_state, NULL);
if (ret)
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 8dc11064253d..c55f338e380b 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -36,6 +36,7 @@
#include <linux/pagemap.h>
#include <linux/shmem_fs.h>
#include <linux/dma-buf.h>
+#include <linux/mem_encrypt.h>
#include <drm/drmP.h>
#include <drm/drm_vma_manager.h>
#include <drm/drm_gem.h>
@@ -255,13 +256,13 @@ drm_gem_object_release_handle(int id, void *ptr, void *data)
struct drm_gem_object *obj = ptr;
struct drm_device *dev = obj->dev;
+ if (dev->driver->gem_close_object)
+ dev->driver->gem_close_object(obj, file_priv);
+
if (drm_core_check_feature(dev, DRIVER_PRIME))
drm_gem_remove_prime_handles(obj, file_priv);
drm_vma_node_revoke(&obj->vma_node, file_priv);
- if (dev->driver->gem_close_object)
- dev->driver->gem_close_object(obj, file_priv);
-
drm_gem_object_handle_put_unlocked(obj);
return 0;
@@ -311,6 +312,41 @@ drm_gem_handle_delete(struct drm_file *filp, u32 handle)
EXPORT_SYMBOL(drm_gem_handle_delete);
/**
+ * drm_gem_dumb_map_offset - return the fake mmap offset for a gem object
+ * @file: drm file-private structure containing the gem object
+ * @dev: corresponding drm_device
+ * @handle: gem object handle
+ * @offset: return location for the fake mmap offset
+ *
+ * This implements the &drm_driver.dumb_map_offset kms driver callback for
+ * drivers which use gem to manage their backing storage.
+ *
+ * Returns:
+ * 0 on success or a negative error code on failure.
+ */
+int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
+ u32 handle, u64 *offset)
+{
+ struct drm_gem_object *obj;
+ int ret;
+
+ obj = drm_gem_object_lookup(file, handle);
+ if (!obj)
+ return -ENOENT;
+
+ ret = drm_gem_create_mmap_offset(obj);
+ if (ret)
+ goto out;
+
+ *offset = drm_vma_node_offset_addr(&obj->vma_node);
+out:
+ drm_gem_object_put_unlocked(obj);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(drm_gem_dumb_map_offset);
+
+/**
* drm_gem_dumb_destroy - dumb fb callback helper for gem based drivers
* @file: drm file-private structure to remove the dumb handle from
* @dev: corresponding drm_device
@@ -826,13 +862,15 @@ drm_gem_object_put_unlocked(struct drm_gem_object *obj)
return;
dev = obj->dev;
- might_lock(&dev->struct_mutex);
- if (dev->driver->gem_free_object_unlocked)
+ if (dev->driver->gem_free_object_unlocked) {
kref_put(&obj->refcount, drm_gem_object_free);
- else if (kref_put_mutex(&obj->refcount, drm_gem_object_free,
+ } else {
+ might_lock(&dev->struct_mutex);
+ if (kref_put_mutex(&obj->refcount, drm_gem_object_free,
&dev->struct_mutex))
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev->struct_mutex);
+ }
}
EXPORT_SYMBOL(drm_gem_object_put_unlocked);
@@ -928,6 +966,7 @@ int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
vma->vm_ops = dev->driver->gem_vm_ops;
vma->vm_private_data = obj;
vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
+ vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
/* Take a ref for this mapping of the object, so that the fault
* handler can dereference the mmap offset's pointer to the object.
@@ -964,7 +1003,7 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
struct drm_vma_offset_node *node;
int ret;
- if (drm_device_is_unplugged(dev))
+ if (drm_dev_is_unplugged(dev))
return -ENODEV;
drm_vma_offset_lock_lookup(dev->vma_offset_manager);
diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c
index bc28e7575254..373e33f22be4 100644
--- a/drivers/gpu/drm/drm_gem_cma_helper.c
+++ b/drivers/gpu/drm/drm_gem_cma_helper.c
@@ -177,7 +177,7 @@ drm_gem_cma_create_with_handle(struct drm_file *file_priv,
* This function frees the backing memory of the CMA GEM object, cleans up the
* GEM object state and frees the memory used to store the object itself.
* Drivers using the CMA helpers should set this as their
- * &drm_driver.gem_free_object callback.
+ * &drm_driver.gem_free_object_unlocked callback.
*/
void drm_gem_cma_free_object(struct drm_gem_object *gem_obj)
{
@@ -264,41 +264,6 @@ int drm_gem_cma_dumb_create(struct drm_file *file_priv,
}
EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_create);
-/**
- * drm_gem_cma_dumb_map_offset - return the fake mmap offset for a CMA GEM
- * object
- * @file_priv: DRM file-private structure containing the GEM object
- * @drm: DRM device
- * @handle: GEM object handle
- * @offset: return location for the fake mmap offset
- *
- * This function look up an object by its handle and returns the fake mmap
- * offset associated with it. Drivers using the CMA helpers should set this
- * as their &drm_driver.dumb_map_offset callback.
- *
- * Returns:
- * 0 on success or a negative error code on failure.
- */
-int drm_gem_cma_dumb_map_offset(struct drm_file *file_priv,
- struct drm_device *drm, u32 handle,
- u64 *offset)
-{
- struct drm_gem_object *gem_obj;
-
- gem_obj = drm_gem_object_lookup(file_priv, handle);
- if (!gem_obj) {
- dev_err(drm->dev, "failed to lookup GEM object\n");
- return -EINVAL;
- }
-
- *offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
-
- drm_gem_object_put_unlocked(gem_obj);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_map_offset);
-
const struct vm_operations_struct drm_gem_cma_vm_ops = {
.open = drm_gem_vm_open,
.close = drm_gem_vm_close,
@@ -390,7 +355,7 @@ unsigned long drm_gem_cma_get_unmapped_area(struct file *filp,
struct drm_device *dev = priv->minor->dev;
struct drm_vma_offset_node *node;
- if (drm_device_is_unplugged(dev))
+ if (drm_dev_is_unplugged(dev))
return -ENODEV;
drm_vma_offset_lock_lookup(dev->vma_offset_manager);
diff --git a/drivers/gpu/drm/drm_gem_framebuffer_helper.c b/drivers/gpu/drm/drm_gem_framebuffer_helper.c
new file mode 100644
index 000000000000..d54a083dc5dd
--- /dev/null
+++ b/drivers/gpu/drm/drm_gem_framebuffer_helper.c
@@ -0,0 +1,283 @@
+/*
+ * drm gem framebuffer helper functions
+ *
+ * Copyright (C) 2017 Noralf Trønnes
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/dma-buf.h>
+#include <linux/dma-fence.h>
+#include <linux/reservation.h>
+#include <linux/slab.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_modeset_helper.h>
+
+/**
+ * DOC: overview
+ *
+ * This library provides helpers for drivers that don't subclass
+ * &drm_framebuffer and and use &drm_gem_object for their backing storage.
+ *
+ * Drivers without additional needs to validate framebuffers can simply use
+ * drm_gem_fb_create() and everything is wired up automatically. But all
+ * parts can be used individually.
+ */
+
+/**
+ * drm_gem_fb_get_obj() - Get GEM object for framebuffer
+ * @fb: The framebuffer
+ * @plane: Which plane
+ *
+ * Returns the GEM object for given framebuffer.
+ */
+struct drm_gem_object *drm_gem_fb_get_obj(struct drm_framebuffer *fb,
+ unsigned int plane)
+{
+ if (plane >= 4)
+ return NULL;
+
+ return fb->obj[plane];
+}
+EXPORT_SYMBOL_GPL(drm_gem_fb_get_obj);
+
+static struct drm_framebuffer *
+drm_gem_fb_alloc(struct drm_device *dev,
+ const struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object **obj, unsigned int num_planes,
+ const struct drm_framebuffer_funcs *funcs)
+{
+ struct drm_framebuffer *fb;
+ int ret, i;
+
+ fb = kzalloc(sizeof(*fb), GFP_KERNEL);
+ if (!fb)
+ return ERR_PTR(-ENOMEM);
+
+ drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd);
+
+ for (i = 0; i < num_planes; i++)
+ fb->obj[i] = obj[i];
+
+ ret = drm_framebuffer_init(dev, fb, funcs);
+ if (ret) {
+ DRM_DEV_ERROR(dev->dev, "Failed to init framebuffer: %d\n",
+ ret);
+ kfree(fb);
+ return ERR_PTR(ret);
+ }
+
+ return fb;
+}
+
+/**
+ * drm_gem_fb_destroy - Free GEM backed framebuffer
+ * @fb: DRM framebuffer
+ *
+ * Frees a GEM backed framebuffer with its backing buffer(s) and the structure
+ * itself. Drivers can use this as their &drm_framebuffer_funcs->destroy
+ * callback.
+ */
+void drm_gem_fb_destroy(struct drm_framebuffer *fb)
+{
+ int i;
+
+ for (i = 0; i < 4; i++)
+ drm_gem_object_put_unlocked(fb->obj[i]);
+
+ drm_framebuffer_cleanup(fb);
+ kfree(fb);
+}
+EXPORT_SYMBOL(drm_gem_fb_destroy);
+
+/**
+ * drm_gem_fb_create_handle - Create handle for GEM backed framebuffer
+ * @fb: DRM framebuffer
+ * @file: drm file
+ * @handle: handle created
+ *
+ * Drivers can use this as their &drm_framebuffer_funcs->create_handle
+ * callback.
+ *
+ * Returns:
+ * 0 on success or a negative error code on failure.
+ */
+int drm_gem_fb_create_handle(struct drm_framebuffer *fb, struct drm_file *file,
+ unsigned int *handle)
+{
+ return drm_gem_handle_create(file, fb->obj[0], handle);
+}
+EXPORT_SYMBOL(drm_gem_fb_create_handle);
+
+/**
+ * drm_gem_fb_create_with_funcs() - helper function for the
+ * &drm_mode_config_funcs.fb_create
+ * callback
+ * @dev: DRM device
+ * @file: drm file for the ioctl call
+ * @mode_cmd: metadata from the userspace fb creation request
+ * @funcs: vtable to be used for the new framebuffer object
+ *
+ * This can be used to set &drm_framebuffer_funcs for drivers that need the
+ * &drm_framebuffer_funcs.dirty callback. Use drm_gem_fb_create() if you don't
+ * need to change &drm_framebuffer_funcs.
+ * The function does buffer size validation.
+ */
+struct drm_framebuffer *
+drm_gem_fb_create_with_funcs(struct drm_device *dev, struct drm_file *file,
+ const struct drm_mode_fb_cmd2 *mode_cmd,
+ const struct drm_framebuffer_funcs *funcs)
+{
+ const struct drm_format_info *info;
+ struct drm_gem_object *objs[4];
+ struct drm_framebuffer *fb;
+ int ret, i;
+
+ info = drm_get_format_info(dev, mode_cmd);
+ if (!info)
+ return ERR_PTR(-EINVAL);
+
+ for (i = 0; i < info->num_planes; i++) {
+ unsigned int width = mode_cmd->width / (i ? info->hsub : 1);
+ unsigned int height = mode_cmd->height / (i ? info->vsub : 1);
+ unsigned int min_size;
+
+ objs[i] = drm_gem_object_lookup(file, mode_cmd->handles[i]);
+ if (!objs[i]) {
+ DRM_DEV_ERROR(dev->dev, "Failed to lookup GEM\n");
+ ret = -ENOENT;
+ goto err_gem_object_put;
+ }
+
+ min_size = (height - 1) * mode_cmd->pitches[i]
+ + width * info->cpp[i]
+ + mode_cmd->offsets[i];
+
+ if (objs[i]->size < min_size) {
+ drm_gem_object_put_unlocked(objs[i]);
+ ret = -EINVAL;
+ goto err_gem_object_put;
+ }
+ }
+
+ fb = drm_gem_fb_alloc(dev, mode_cmd, objs, i, funcs);
+ if (IS_ERR(fb)) {
+ ret = PTR_ERR(fb);
+ goto err_gem_object_put;
+ }
+
+ return fb;
+
+err_gem_object_put:
+ for (i--; i >= 0; i--)
+ drm_gem_object_put_unlocked(objs[i]);
+
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(drm_gem_fb_create_with_funcs);
+
+static const struct drm_framebuffer_funcs drm_gem_fb_funcs = {
+ .destroy = drm_gem_fb_destroy,
+ .create_handle = drm_gem_fb_create_handle,
+};
+
+/**
+ * drm_gem_fb_create() - &drm_mode_config_funcs.fb_create callback function
+ * @dev: DRM device
+ * @file: drm file for the ioctl call
+ * @mode_cmd: metadata from the userspace fb creation request
+ *
+ * If your hardware has special alignment or pitch requirements these should be
+ * checked before calling this function. The function does buffer size
+ * validation. Use drm_gem_fb_create_with_funcs() if you need to set
+ * &drm_framebuffer_funcs.dirty.
+ */
+struct drm_framebuffer *
+drm_gem_fb_create(struct drm_device *dev, struct drm_file *file,
+ const struct drm_mode_fb_cmd2 *mode_cmd)
+{
+ return drm_gem_fb_create_with_funcs(dev, file, mode_cmd,
+ &drm_gem_fb_funcs);
+}
+EXPORT_SYMBOL_GPL(drm_gem_fb_create);
+
+/**
+ * drm_gem_fb_prepare_fb() - Prepare gem framebuffer
+ * @plane: Which plane
+ * @state: Plane state attach fence to
+ *
+ * This can be used as the &drm_plane_helper_funcs.prepare_fb hook.
+ *
+ * This function checks if the plane FB has an dma-buf attached, extracts
+ * the exclusive fence and attaches it to plane state for the atomic helper
+ * to wait on.
+ *
+ * There is no need for &drm_plane_helper_funcs.cleanup_fb hook for simple
+ * gem based framebuffer drivers which have their buffers always pinned in
+ * memory.
+ */
+int drm_gem_fb_prepare_fb(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct dma_buf *dma_buf;
+ struct dma_fence *fence;
+
+ if ((plane->state->fb == state->fb) || !state->fb)
+ return 0;
+
+ dma_buf = drm_gem_fb_get_obj(state->fb, 0)->dma_buf;
+ if (dma_buf) {
+ fence = reservation_object_get_excl_rcu(dma_buf->resv);
+ drm_atomic_set_fence_for_plane(state, fence);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(drm_gem_fb_prepare_fb);
+
+/**
+ * drm_gem_fbdev_fb_create - Create a drm_framebuffer for fbdev emulation
+ * @dev: DRM device
+ * @sizes: fbdev size description
+ * @pitch_align: optional pitch alignment
+ * @obj: GEM object backing the framebuffer
+ * @funcs: vtable to be used for the new framebuffer object
+ *
+ * This function creates a framebuffer for use with fbdev emulation.
+ *
+ * Returns:
+ * Pointer to a drm_framebuffer on success or an error pointer on failure.
+ */
+struct drm_framebuffer *
+drm_gem_fbdev_fb_create(struct drm_device *dev,
+ struct drm_fb_helper_surface_size *sizes,
+ unsigned int pitch_align, struct drm_gem_object *obj,
+ const struct drm_framebuffer_funcs *funcs)
+{
+ struct drm_mode_fb_cmd2 mode_cmd = { 0 };
+
+ mode_cmd.width = sizes->surface_width;
+ mode_cmd.height = sizes->surface_height;
+ mode_cmd.pitches[0] = sizes->surface_width *
+ DIV_ROUND_UP(sizes->surface_bpp, 8);
+ if (pitch_align)
+ mode_cmd.pitches[0] = roundup(mode_cmd.pitches[0],
+ pitch_align);
+ mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+ sizes->surface_depth);
+ if (obj->size < mode_cmd.pitches[0] * mode_cmd.height)
+ return ERR_PTR(-EINVAL);
+
+ return drm_gem_fb_alloc(dev, &mode_cmd, &obj, 1, funcs);
+}
+EXPORT_SYMBOL(drm_gem_fbdev_fb_create);
diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
index 5edc24bd10fa..fbc3f308fa19 100644
--- a/drivers/gpu/drm/drm_internal.h
+++ b/drivers/gpu/drm/drm_internal.h
@@ -32,6 +32,7 @@ void drm_lastclose(struct drm_device *dev);
int drm_irq_by_busid(struct drm_device *dev, void *data,
struct drm_file *file_priv);
void drm_pci_agp_destroy(struct drm_device *dev);
+int drm_pci_set_busid(struct drm_device *dev, struct drm_master *master);
/* drm_prime.c */
int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data,
@@ -56,14 +57,19 @@ int drm_gem_name_info(struct seq_file *m, void *data);
/* drm_vblank.c */
extern unsigned int drm_timestamp_monotonic;
void drm_vblank_disable_and_save(struct drm_device *dev, unsigned int pipe);
+void drm_vblank_cleanup(struct drm_device *dev);
+
+/* IOCTLS */
+int drm_wait_vblank_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp);
+int drm_legacy_modeset_ctl_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+
+/* drm_irq.c */
/* IOCTLS */
-int drm_wait_vblank(struct drm_device *dev, void *data,
- struct drm_file *filp);
int drm_legacy_irq_control(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-int drm_legacy_modeset_ctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
/* drm_auth.c */
int drm_getmagic(struct drm_device *dev, void *data,
@@ -161,3 +167,9 @@ int drm_syncobj_handle_to_fd_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_private);
int drm_syncobj_fd_to_handle_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_private);
+int drm_syncobj_wait_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_private);
+int drm_syncobj_reset_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_private);
+int drm_syncobj_signal_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_private);
diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
index d1f202852028..f8e96e648acf 100644
--- a/drivers/gpu/drm/drm_ioc32.c
+++ b/drivers/gpu/drm/drm_ioc32.c
@@ -842,7 +842,7 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
req.request.type = req32.request.type;
req.request.sequence = req32.request.sequence;
req.request.signal = req32.request.signal;
- err = drm_ioctl_kernel(file, drm_wait_vblank, &req, DRM_UNLOCKED);
+ err = drm_ioctl_kernel(file, drm_wait_vblank_ioctl, &req, DRM_UNLOCKED);
if (err)
return err;
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index f1eb326524cf..a9ae6dd2d593 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -143,8 +143,8 @@ static int drm_set_busid(struct drm_device *dev, struct drm_file *file_priv)
if (master->unique != NULL)
drm_unset_busid(dev, master);
- if (dev->driver->set_busid) {
- ret = dev->driver->set_busid(dev, master);
+ if (dev->dev && dev_is_pci(dev->dev)) {
+ ret = drm_pci_set_busid(dev, master);
if (ret) {
drm_unset_busid(dev, master);
return ret;
@@ -603,9 +603,9 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_legacy_sg_alloc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_legacy_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank_ioctl, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_legacy_modeset_ctl, 0),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_legacy_modeset_ctl_ioctl, 0),
DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
@@ -657,6 +657,12 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE, drm_syncobj_fd_to_handle_ioctl,
DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_WAIT, drm_syncobj_wait_ioctl,
+ DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_RESET, drm_syncobj_reset_ioctl,
+ DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_SIGNAL, drm_syncobj_signal_ioctl,
+ DRM_UNLOCKED|DRM_RENDER_ALLOW),
};
#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
@@ -695,7 +701,7 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
*
* DRM driver private IOCTL must be in the range from DRM_COMMAND_BASE to
* DRM_COMMAND_END. Finally you need an array of &struct drm_ioctl_desc to wire
- * up the handlers and set the access rights:
+ * up the handlers and set the access rights::
*
* static const struct drm_ioctl_desc my_driver_ioctls[] = {
* DRM_IOCTL_DEF_DRV(MY_DRIVER_OPERATION, my_driver_operation,
@@ -704,6 +710,9 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
*
* And then assign this to the &drm_driver.ioctls field in your driver
* structure.
+ *
+ * See the separate chapter on :ref:`file operations<drm_driver_fops>` for how
+ * the driver-specific IOCTLs are wired up.
*/
long drm_ioctl_kernel(struct file *file, drm_ioctl_t *func, void *kdata,
@@ -713,7 +722,7 @@ long drm_ioctl_kernel(struct file *file, drm_ioctl_t *func, void *kdata,
struct drm_device *dev = file_priv->minor->dev;
int retcode;
- if (drm_device_is_unplugged(dev))
+ if (drm_dev_is_unplugged(dev))
return -ENODEV;
retcode = drm_ioctl_permit(flags, file_priv);
@@ -762,7 +771,7 @@ long drm_ioctl(struct file *filp,
dev = file_priv->minor->dev;
- if (drm_device_is_unplugged(dev))
+ if (drm_dev_is_unplugged(dev))
return -ENODEV;
is_driver_ioctl = nr >= DRM_COMMAND_BASE && nr < DRM_COMMAND_END;
diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c
index 1160a579e0dc..4b47226b90d4 100644
--- a/drivers/gpu/drm/drm_mipi_dsi.c
+++ b/drivers/gpu/drm/drm_mipi_dsi.c
@@ -165,14 +165,14 @@ of_mipi_dsi_device_add(struct mipi_dsi_host *host, struct device_node *node)
u32 reg;
if (of_modalias_node(node, info.type, sizeof(info.type)) < 0) {
- dev_err(dev, "modalias failure on %s\n", node->full_name);
+ dev_err(dev, "modalias failure on %pOF\n", node);
return ERR_PTR(-EINVAL);
}
ret = of_property_read_u32(node, "reg", &reg);
if (ret) {
- dev_err(dev, "device node %s has no valid reg property: %d\n",
- node->full_name, ret);
+ dev_err(dev, "device node %pOF has no valid reg property: %d\n",
+ node, ret);
return ERR_PTR(-EINVAL);
}
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index f794089d30ac..61a1c8ea74bc 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -169,7 +169,7 @@ INTERVAL_TREE_DEFINE(struct drm_mm_node, rb,
struct drm_mm_node *
__drm_mm_interval_first(const struct drm_mm *mm, u64 start, u64 last)
{
- return drm_mm_interval_tree_iter_first((struct rb_root *)&mm->interval_tree,
+ return drm_mm_interval_tree_iter_first((struct rb_root_cached *)&mm->interval_tree,
start, last) ?: (struct drm_mm_node *)&mm->head_node;
}
EXPORT_SYMBOL(__drm_mm_interval_first);
@@ -180,6 +180,7 @@ static void drm_mm_interval_tree_add_node(struct drm_mm_node *hole_node,
struct drm_mm *mm = hole_node->mm;
struct rb_node **link, *rb;
struct drm_mm_node *parent;
+ bool leftmost = true;
node->__subtree_last = LAST(node);
@@ -196,9 +197,10 @@ static void drm_mm_interval_tree_add_node(struct drm_mm_node *hole_node,
rb = &hole_node->rb;
link = &hole_node->rb.rb_right;
+ leftmost = false;
} else {
rb = NULL;
- link = &mm->interval_tree.rb_node;
+ link = &mm->interval_tree.rb_root.rb_node;
}
while (*link) {
@@ -208,14 +210,15 @@ static void drm_mm_interval_tree_add_node(struct drm_mm_node *hole_node,
parent->__subtree_last = node->__subtree_last;
if (node->start < parent->start)
link = &parent->rb.rb_left;
- else
+ else {
link = &parent->rb.rb_right;
+ leftmost = true;
+ }
}
rb_link_node(&node->rb, rb, link);
- rb_insert_augmented(&node->rb,
- &mm->interval_tree,
- &drm_mm_interval_tree_augment);
+ rb_insert_augmented_cached(&node->rb, &mm->interval_tree, leftmost,
+ &drm_mm_interval_tree_augment);
}
#define RB_INSERT(root, member, expr) do { \
@@ -577,7 +580,7 @@ void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
*new = *old;
list_replace(&old->node_list, &new->node_list);
- rb_replace_node(&old->rb, &new->rb, &old->mm->interval_tree);
+ rb_replace_node(&old->rb, &new->rb, &old->mm->interval_tree.rb_root);
if (drm_mm_hole_follows(old)) {
list_replace(&old->hole_stack, &new->hole_stack);
@@ -863,7 +866,7 @@ void drm_mm_init(struct drm_mm *mm, u64 start, u64 size)
mm->color_adjust = NULL;
INIT_LIST_HEAD(&mm->hole_stack);
- mm->interval_tree = RB_ROOT;
+ mm->interval_tree = RB_ROOT_CACHED;
mm->holes_size = RB_ROOT;
mm->holes_addr = RB_ROOT;
diff --git a/drivers/gpu/drm/drm_mode_config.c b/drivers/gpu/drm/drm_mode_config.c
index d9862259a2a7..74f6ff5df656 100644
--- a/drivers/gpu/drm/drm_mode_config.c
+++ b/drivers/gpu/drm/drm_mode_config.c
@@ -337,6 +337,13 @@ static int drm_mode_create_standard_properties(struct drm_device *dev)
return -ENOMEM;
dev->mode_config.gamma_lut_size_property = prop;
+ prop = drm_property_create(dev,
+ DRM_MODE_PROP_IMMUTABLE | DRM_MODE_PROP_BLOB,
+ "IN_FORMATS", 0);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.modifiers_property = prop;
+
return 0;
}
diff --git a/drivers/gpu/drm/drm_mode_object.c b/drivers/gpu/drm/drm_mode_object.c
index da9a9adbcc98..1055533792f3 100644
--- a/drivers/gpu/drm/drm_mode_object.c
+++ b/drivers/gpu/drm/drm_mode_object.c
@@ -233,6 +233,9 @@ int drm_object_property_set_value(struct drm_mode_object *obj,
{
int i;
+ WARN_ON(drm_drv_uses_atomic_modeset(property->dev) &&
+ !(property->flags & DRM_MODE_PROP_IMMUTABLE));
+
for (i = 0; i < obj->properties->count; i++) {
if (obj->properties->properties[i] == property) {
obj->properties->values[i] = val;
@@ -244,24 +247,7 @@ int drm_object_property_set_value(struct drm_mode_object *obj,
}
EXPORT_SYMBOL(drm_object_property_set_value);
-/**
- * drm_object_property_get_value - retrieve the value of a property
- * @obj: drm mode object to get property value from
- * @property: property to retrieve
- * @val: storage for the property value
- *
- * This function retrieves the softare state of the given property for the given
- * property. Since there is no driver callback to retrieve the current property
- * value this might be out of sync with the hardware, depending upon the driver
- * and property.
- *
- * Atomic drivers should never call this function directly, the core will read
- * out property values through the various ->atomic_get_property callbacks.
- *
- * Returns:
- * Zero on success, error code on failure.
- */
-int drm_object_property_get_value(struct drm_mode_object *obj,
+int __drm_object_property_get_value(struct drm_mode_object *obj,
struct drm_property *property, uint64_t *val)
{
int i;
@@ -284,6 +270,31 @@ int drm_object_property_get_value(struct drm_mode_object *obj,
return -EINVAL;
}
+
+/**
+ * drm_object_property_get_value - retrieve the value of a property
+ * @obj: drm mode object to get property value from
+ * @property: property to retrieve
+ * @val: storage for the property value
+ *
+ * This function retrieves the softare state of the given property for the given
+ * property. Since there is no driver callback to retrieve the current property
+ * value this might be out of sync with the hardware, depending upon the driver
+ * and property.
+ *
+ * Atomic drivers should never call this function directly, the core will read
+ * out property values through the various ->atomic_get_property callbacks.
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
+int drm_object_property_get_value(struct drm_mode_object *obj,
+ struct drm_property *property, uint64_t *val)
+{
+ WARN_ON(drm_drv_uses_atomic_modeset(property->dev));
+
+ return __drm_object_property_get_value(obj, property, val);
+}
EXPORT_SYMBOL(drm_object_property_get_value);
/* helper for getconnector and getproperties ioctls */
@@ -302,7 +313,7 @@ int drm_mode_object_get_properties(struct drm_mode_object *obj, bool atomic,
continue;
if (*arg_count_props > count) {
- ret = drm_object_property_get_value(obj, prop, &val);
+ ret = __drm_object_property_get_value(obj, prop, &val);
if (ret)
return ret;
@@ -381,6 +392,83 @@ struct drm_property *drm_mode_obj_find_prop_id(struct drm_mode_object *obj,
return NULL;
}
+static int set_property_legacy(struct drm_mode_object *obj,
+ struct drm_property *prop,
+ uint64_t prop_value)
+{
+ struct drm_device *dev = prop->dev;
+ struct drm_mode_object *ref;
+ int ret = -EINVAL;
+
+ if (!drm_property_change_valid_get(prop, prop_value, &ref))
+ return -EINVAL;
+
+ drm_modeset_lock_all(dev);
+ switch (obj->type) {
+ case DRM_MODE_OBJECT_CONNECTOR:
+ ret = drm_mode_connector_set_obj_prop(obj, prop,
+ prop_value);
+ break;
+ case DRM_MODE_OBJECT_CRTC:
+ ret = drm_mode_crtc_set_obj_prop(obj, prop, prop_value);
+ break;
+ case DRM_MODE_OBJECT_PLANE:
+ ret = drm_mode_plane_set_obj_prop(obj_to_plane(obj),
+ prop, prop_value);
+ break;
+ }
+ drm_property_change_valid_put(prop, ref);
+ drm_modeset_unlock_all(dev);
+
+ return ret;
+}
+
+static int set_property_atomic(struct drm_mode_object *obj,
+ struct drm_property *prop,
+ uint64_t prop_value)
+{
+ struct drm_device *dev = prop->dev;
+ struct drm_atomic_state *state;
+ struct drm_modeset_acquire_ctx ctx;
+ int ret;
+
+ drm_modeset_acquire_init(&ctx, 0);
+
+ state = drm_atomic_state_alloc(dev);
+ if (!state)
+ return -ENOMEM;
+ state->acquire_ctx = &ctx;
+retry:
+ if (prop == state->dev->mode_config.dpms_property) {
+ if (obj->type != DRM_MODE_OBJECT_CONNECTOR) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = drm_atomic_connector_commit_dpms(state,
+ obj_to_connector(obj),
+ prop_value);
+ } else {
+ ret = drm_atomic_set_property(state, obj, prop, prop_value);
+ if (ret)
+ goto out;
+ ret = drm_atomic_commit(state);
+ }
+out:
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ drm_modeset_backoff(&ctx);
+ goto retry;
+ }
+
+ drm_atomic_state_put(state);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+
+ return ret;
+}
+
int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
@@ -388,18 +476,13 @@ int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data,
struct drm_mode_object *arg_obj;
struct drm_property *property;
int ret = -EINVAL;
- struct drm_mode_object *ref;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
- drm_modeset_lock_all(dev);
-
arg_obj = drm_mode_object_find(dev, arg->obj_id, arg->obj_type);
- if (!arg_obj) {
- ret = -ENOENT;
- goto out;
- }
+ if (!arg_obj)
+ return -ENOENT;
if (!arg_obj->properties)
goto out_unref;
@@ -408,28 +491,12 @@ int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data,
if (!property)
goto out_unref;
- if (!drm_property_change_valid_get(property, arg->value, &ref))
- goto out_unref;
-
- switch (arg_obj->type) {
- case DRM_MODE_OBJECT_CONNECTOR:
- ret = drm_mode_connector_set_obj_prop(arg_obj, property,
- arg->value);
- break;
- case DRM_MODE_OBJECT_CRTC:
- ret = drm_mode_crtc_set_obj_prop(arg_obj, property, arg->value);
- break;
- case DRM_MODE_OBJECT_PLANE:
- ret = drm_mode_plane_set_obj_prop(obj_to_plane(arg_obj),
- property, arg->value);
- break;
- }
-
- drm_property_change_valid_put(property, ref);
+ if (drm_drv_uses_atomic_modeset(property->dev))
+ ret = set_property_atomic(arg_obj, property, arg->value);
+ else
+ ret = set_property_legacy(arg_obj, property, arg->value);
out_unref:
drm_mode_object_put(arg_obj);
-out:
- drm_modeset_unlock_all(dev);
return ret;
}
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index f2493b9b82e6..4a3f68a33844 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -709,8 +709,8 @@ int of_get_drm_display_mode(struct device_node *np,
if (bus_flags)
drm_bus_flags_from_videomode(&vm, bus_flags);
- pr_debug("%s: got %dx%d display mode from %s\n",
- of_node_full_name(np), vm.hactive, vm.vactive, np->name);
+ pr_debug("%pOF: got %dx%d display mode from %s\n",
+ np, vm.hactive, vm.vactive, np->name);
drm_mode_debug_printmodeline(dmode);
return 0;
@@ -1083,6 +1083,34 @@ drm_mode_validate_size(const struct drm_display_mode *mode,
}
EXPORT_SYMBOL(drm_mode_validate_size);
+/**
+ * drm_mode_validate_ycbcr420 - add 'ycbcr420-only' modes only when allowed
+ * @mode: mode to check
+ * @connector: drm connector under action
+ *
+ * This function is a helper which can be used to filter out any YCBCR420
+ * only mode, when the source doesn't support it.
+ *
+ * Returns:
+ * The mode status
+ */
+enum drm_mode_status
+drm_mode_validate_ycbcr420(const struct drm_display_mode *mode,
+ struct drm_connector *connector)
+{
+ u8 vic = drm_match_cea_mode(mode);
+ enum drm_mode_status status = MODE_OK;
+ struct drm_hdmi_info *hdmi = &connector->display_info.hdmi;
+
+ if (test_bit(vic, hdmi->y420_vdb_modes)) {
+ if (!connector->ycbcr_420_allowed)
+ status = MODE_NO_420;
+ }
+
+ return status;
+}
+EXPORT_SYMBOL(drm_mode_validate_ycbcr420);
+
#define MODE_STATUS(status) [MODE_ ## status + 3] = #status
static const char * const drm_mode_status_names[] = {
@@ -1122,6 +1150,7 @@ static const char * const drm_mode_status_names[] = {
MODE_STATUS(ONE_SIZE),
MODE_STATUS(NO_REDUCED),
MODE_STATUS(NO_STEREO),
+ MODE_STATUS(NO_420),
MODE_STATUS(STALE),
MODE_STATUS(BAD),
MODE_STATUS(ERROR),
@@ -1576,3 +1605,61 @@ int drm_mode_convert_umode(struct drm_display_mode *out,
out:
return ret;
}
+
+/**
+ * drm_mode_is_420_only - if a given videomode can be only supported in YCBCR420
+ * output format
+ *
+ * @display: display under action
+ * @mode: video mode to be tested.
+ *
+ * Returns:
+ * true if the mode can be supported in YCBCR420 format
+ * false if not.
+ */
+bool drm_mode_is_420_only(const struct drm_display_info *display,
+ const struct drm_display_mode *mode)
+{
+ u8 vic = drm_match_cea_mode(mode);
+
+ return test_bit(vic, display->hdmi.y420_vdb_modes);
+}
+EXPORT_SYMBOL(drm_mode_is_420_only);
+
+/**
+ * drm_mode_is_420_also - if a given videomode can be supported in YCBCR420
+ * output format also (along with RGB/YCBCR444/422)
+ *
+ * @display: display under action.
+ * @mode: video mode to be tested.
+ *
+ * Returns:
+ * true if the mode can be support YCBCR420 format
+ * false if not.
+ */
+bool drm_mode_is_420_also(const struct drm_display_info *display,
+ const struct drm_display_mode *mode)
+{
+ u8 vic = drm_match_cea_mode(mode);
+
+ return test_bit(vic, display->hdmi.y420_cmdb_modes);
+}
+EXPORT_SYMBOL(drm_mode_is_420_also);
+/**
+ * drm_mode_is_420 - if a given videomode can be supported in YCBCR420
+ * output format
+ *
+ * @display: display under action.
+ * @mode: video mode to be tested.
+ *
+ * Returns:
+ * true if the mode can be supported in YCBCR420 format
+ * false if not.
+ */
+bool drm_mode_is_420(const struct drm_display_info *display,
+ const struct drm_display_mode *mode)
+{
+ return drm_mode_is_420_only(display, mode) ||
+ drm_mode_is_420_also(display, mode);
+}
+EXPORT_SYMBOL(drm_mode_is_420);
diff --git a/drivers/gpu/drm/drm_modeset_helper.c b/drivers/gpu/drm/drm_modeset_helper.c
index 2b33825f2f93..9cb1eede0b4d 100644
--- a/drivers/gpu/drm/drm_modeset_helper.c
+++ b/drivers/gpu/drm/drm_modeset_helper.c
@@ -124,6 +124,7 @@ static struct drm_plane *create_primary_plane(struct drm_device *dev)
&drm_primary_helper_funcs,
safe_modeset_formats,
ARRAY_SIZE(safe_modeset_formats),
+ NULL,
DRM_PLANE_TYPE_PRIMARY, NULL);
if (ret) {
kfree(primary);
diff --git a/drivers/gpu/drm/drm_modeset_lock.c b/drivers/gpu/drm/drm_modeset_lock.c
index 64ef09a6cccb..af4e906c630d 100644
--- a/drivers/gpu/drm/drm_modeset_lock.c
+++ b/drivers/gpu/drm/drm_modeset_lock.c
@@ -52,7 +52,12 @@
* drm_modeset_drop_locks(&ctx);
* drm_modeset_acquire_fini(&ctx);
*
- * On top of of these per-object locks using &ww_mutex there's also an overall
+ * If all that is needed is a single modeset lock, then the &struct
+ * drm_modeset_acquire_ctx is not needed and the locking can be simplified
+ * by passing a NULL instead of ctx in the drm_modeset_lock()
+ * call and, when done, by calling drm_modeset_unlock().
+ *
+ * On top of these per-object locks using &ww_mutex there's also an overall
* &drm_mode_config.mutex, for protecting everything else. Mostly this means
* probe state of connectors, and preventing hotplug add/removal of connectors.
*
@@ -313,11 +318,14 @@ EXPORT_SYMBOL(drm_modeset_lock_init);
* @lock: lock to take
* @ctx: acquire ctx
*
- * If ctx is not NULL, then its ww acquire context is used and the
+ * If @ctx is not NULL, then its ww acquire context is used and the
* lock will be tracked by the context and can be released by calling
* drm_modeset_drop_locks(). If -EDEADLK is returned, this means a
* deadlock scenario has been detected and it is an error to attempt
* to take any more locks without first calling drm_modeset_backoff().
+ *
+ * If @ctx is NULL then the function call behaves like a normal,
+ * non-nesting mutex_lock() call.
*/
int drm_modeset_lock(struct drm_modeset_lock *lock,
struct drm_modeset_acquire_ctx *ctx)
diff --git a/drivers/gpu/drm/drm_of.c b/drivers/gpu/drm/drm_of.c
index 2120f33bdf4a..8dafbdfcd2ea 100644
--- a/drivers/gpu/drm/drm_of.c
+++ b/drivers/gpu/drm/drm_of.c
@@ -160,8 +160,8 @@ int drm_of_component_probe(struct device *dev,
of_node_put(remote);
continue;
} else if (!of_device_is_available(remote->parent)) {
- dev_warn(dev, "parent device of %s is not available\n",
- remote->full_name);
+ dev_warn(dev, "parent device of %pOF is not available\n",
+ remote);
of_node_put(remote);
continue;
}
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index 1eb4fc3eee20..1235c9877d6f 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -149,7 +149,6 @@ int drm_pci_set_busid(struct drm_device *dev, struct drm_master *master)
master->unique_len = strlen(master->unique);
return 0;
}
-EXPORT_SYMBOL(drm_pci_set_busid);
static int drm_pci_irq_by_busid(struct drm_device *dev, struct drm_irq_busid *p)
{
@@ -281,20 +280,15 @@ err_free:
EXPORT_SYMBOL(drm_get_pci_dev);
/**
- * drm_pci_init - Register matching PCI devices with the DRM subsystem
+ * drm_legacy_pci_init - shadow-attach a legacy DRM PCI driver
* @driver: DRM device driver
* @pdriver: PCI device driver
*
- * Initializes a drm_device structures, registering the stubs and initializing
- * the AGP device.
- *
- * NOTE: This function is deprecated. Modern modesetting drm drivers should use
- * pci_register_driver() directly, this function only provides shadow-binding
- * support for old legacy drivers on top of that core pci function.
+ * This is only used by legacy dri1 drivers and deprecated.
*
* Return: 0 on success or a negative error code on failure.
*/
-int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver)
+int drm_legacy_pci_init(struct drm_driver *driver, struct pci_driver *pdriver)
{
struct pci_dev *pdev = NULL;
const struct pci_device_id *pid;
@@ -302,8 +296,8 @@ int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver)
DRM_DEBUG("\n");
- if (!(driver->driver_features & DRIVER_LEGACY))
- return pci_register_driver(pdriver);
+ if (WARN_ON(!(driver->driver_features & DRIVER_LEGACY)))
+ return -EINVAL;
/* If not using KMS, fall back to stealth mode manual scanning. */
INIT_LIST_HEAD(&driver->legacy_dev_list);
@@ -330,6 +324,7 @@ int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver)
}
return 0;
}
+EXPORT_SYMBOL(drm_legacy_pci_init);
int drm_pcie_get_speed_cap_mask(struct drm_device *dev, u32 *mask)
{
@@ -391,11 +386,6 @@ EXPORT_SYMBOL(drm_pcie_get_max_link_width);
#else
-int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver)
-{
- return -1;
-}
-
void drm_pci_agp_destroy(struct drm_device *dev) {}
int drm_irq_by_busid(struct drm_device *dev, void *data,
@@ -405,27 +395,21 @@ int drm_irq_by_busid(struct drm_device *dev, void *data,
}
#endif
-EXPORT_SYMBOL(drm_pci_init);
-
/**
- * drm_pci_exit - Unregister matching PCI devices from the DRM subsystem
+ * drm_legacy_pci_exit - unregister shadow-attach legacy DRM driver
* @driver: DRM device driver
* @pdriver: PCI device driver
*
- * Unregisters one or more devices matched by a PCI driver from the DRM
- * subsystem.
- *
- * NOTE: This function is deprecated. Modern modesetting drm drivers should use
- * pci_unregister_driver() directly, this function only provides shadow-binding
- * support for old legacy drivers on top of that core pci function.
+ * Unregister a DRM driver shadow-attached through drm_legacy_pci_init(). This
+ * is deprecated and only used by dri1 drivers.
*/
-void drm_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver)
+void drm_legacy_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver)
{
struct drm_device *dev, *tmp;
DRM_DEBUG("\n");
if (!(driver->driver_features & DRIVER_LEGACY)) {
- pci_unregister_driver(pdriver);
+ WARN_ON(1);
} else {
list_for_each_entry_safe(dev, tmp, &driver->legacy_dev_list,
legacy_dev_list) {
@@ -435,4 +419,4 @@ void drm_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver)
}
DRM_INFO("Module unloaded\n");
}
-EXPORT_SYMBOL(drm_pci_exit);
+EXPORT_SYMBOL(drm_legacy_pci_exit);
diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c
index 5dc8c4350602..7a00351d5b5d 100644
--- a/drivers/gpu/drm/drm_plane.c
+++ b/drivers/gpu/drm/drm_plane.c
@@ -62,6 +62,87 @@ static unsigned int drm_num_planes(struct drm_device *dev)
return num;
}
+static inline u32 *
+formats_ptr(struct drm_format_modifier_blob *blob)
+{
+ return (u32 *)(((char *)blob) + blob->formats_offset);
+}
+
+static inline struct drm_format_modifier *
+modifiers_ptr(struct drm_format_modifier_blob *blob)
+{
+ return (struct drm_format_modifier *)(((char *)blob) + blob->modifiers_offset);
+}
+
+static int create_in_format_blob(struct drm_device *dev, struct drm_plane *plane)
+{
+ const struct drm_mode_config *config = &dev->mode_config;
+ struct drm_property_blob *blob;
+ struct drm_format_modifier *mod;
+ size_t blob_size, formats_size, modifiers_size;
+ struct drm_format_modifier_blob *blob_data;
+ unsigned int i, j;
+
+ formats_size = sizeof(__u32) * plane->format_count;
+ if (WARN_ON(!formats_size)) {
+ /* 0 formats are never expected */
+ return 0;
+ }
+
+ modifiers_size =
+ sizeof(struct drm_format_modifier) * plane->modifier_count;
+
+ blob_size = sizeof(struct drm_format_modifier_blob);
+ /* Modifiers offset is a pointer to a struct with a 64 bit field so it
+ * should be naturally aligned to 8B.
+ */
+ BUILD_BUG_ON(sizeof(struct drm_format_modifier_blob) % 8);
+ blob_size += ALIGN(formats_size, 8);
+ blob_size += modifiers_size;
+
+ blob = drm_property_create_blob(dev, blob_size, NULL);
+ if (IS_ERR(blob))
+ return -1;
+
+ blob_data = (struct drm_format_modifier_blob *)blob->data;
+ blob_data->version = FORMAT_BLOB_CURRENT;
+ blob_data->count_formats = plane->format_count;
+ blob_data->formats_offset = sizeof(struct drm_format_modifier_blob);
+ blob_data->count_modifiers = plane->modifier_count;
+
+ blob_data->modifiers_offset =
+ ALIGN(blob_data->formats_offset + formats_size, 8);
+
+ memcpy(formats_ptr(blob_data), plane->format_types, formats_size);
+
+ /* If we can't determine support, just bail */
+ if (!plane->funcs->format_mod_supported)
+ goto done;
+
+ mod = modifiers_ptr(blob_data);
+ for (i = 0; i < plane->modifier_count; i++) {
+ for (j = 0; j < plane->format_count; j++) {
+ if (plane->funcs->format_mod_supported(plane,
+ plane->format_types[j],
+ plane->modifiers[i])) {
+
+ mod->formats |= 1ULL << j;
+ }
+ }
+
+ mod->modifier = plane->modifiers[i];
+ mod->offset = 0;
+ mod->pad = 0;
+ mod++;
+ }
+
+done:
+ drm_object_attach_property(&plane->base, config->modifiers_property,
+ blob->base.id);
+
+ return 0;
+}
+
/**
* drm_universal_plane_init - Initialize a new universal plane object
* @dev: DRM device
@@ -70,6 +151,8 @@ static unsigned int drm_num_planes(struct drm_device *dev)
* @funcs: callbacks for the new plane
* @formats: array of supported formats (DRM_FORMAT\_\*)
* @format_count: number of elements in @formats
+ * @format_modifiers: array of struct drm_format modifiers terminated by
+ * DRM_FORMAT_MOD_INVALID
* @type: type of plane (overlay, primary, cursor)
* @name: printf style format string for the plane name, or NULL for default name
*
@@ -82,10 +165,12 @@ int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane,
uint32_t possible_crtcs,
const struct drm_plane_funcs *funcs,
const uint32_t *formats, unsigned int format_count,
+ const uint64_t *format_modifiers,
enum drm_plane_type type,
const char *name, ...)
{
struct drm_mode_config *config = &dev->mode_config;
+ unsigned int format_modifier_count = 0;
int ret;
ret = drm_mode_object_add(dev, &plane->base, DRM_MODE_OBJECT_PLANE);
@@ -105,6 +190,31 @@ int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane,
return -ENOMEM;
}
+ /*
+ * First driver to need more than 64 formats needs to fix this. Each
+ * format is encoded as a bit and the current code only supports a u64.
+ */
+ if (WARN_ON(format_count > 64))
+ return -EINVAL;
+
+ if (format_modifiers) {
+ const uint64_t *temp_modifiers = format_modifiers;
+ while (*temp_modifiers++ != DRM_FORMAT_MOD_INVALID)
+ format_modifier_count++;
+ }
+
+ plane->modifier_count = format_modifier_count;
+ plane->modifiers = kmalloc_array(format_modifier_count,
+ sizeof(format_modifiers[0]),
+ GFP_KERNEL);
+
+ if (format_modifier_count && !plane->modifiers) {
+ DRM_DEBUG_KMS("out of memory when allocating plane\n");
+ kfree(plane->format_types);
+ drm_mode_object_unregister(dev, &plane->base);
+ return -ENOMEM;
+ }
+
if (name) {
va_list ap;
@@ -117,12 +227,15 @@ int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane,
}
if (!plane->name) {
kfree(plane->format_types);
+ kfree(plane->modifiers);
drm_mode_object_unregister(dev, &plane->base);
return -ENOMEM;
}
memcpy(plane->format_types, formats, format_count * sizeof(uint32_t));
plane->format_count = format_count;
+ memcpy(plane->modifiers, format_modifiers,
+ format_modifier_count * sizeof(format_modifiers[0]));
plane->possible_crtcs = possible_crtcs;
plane->type = type;
@@ -149,6 +262,9 @@ int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane,
drm_object_attach_property(&plane->base, config->prop_src_h, 0);
}
+ if (config->allow_fb_modifiers)
+ create_in_format_blob(dev, plane);
+
return 0;
}
EXPORT_SYMBOL(drm_universal_plane_init);
@@ -205,7 +321,8 @@ int drm_plane_init(struct drm_device *dev, struct drm_plane *plane,
type = is_primary ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY;
return drm_universal_plane_init(dev, plane, possible_crtcs, funcs,
- formats, format_count, type, NULL);
+ formats, format_count,
+ NULL, type, NULL);
}
EXPORT_SYMBOL(drm_plane_init);
@@ -224,6 +341,7 @@ void drm_plane_cleanup(struct drm_plane *plane)
drm_modeset_lock_fini(&plane->mutex);
kfree(plane->format_types);
+ kfree(plane->modifiers);
drm_mode_object_unregister(dev, &plane->base);
BUG_ON(list_empty(&plane->head));
@@ -601,6 +719,7 @@ int drm_mode_setplane(struct drm_device *dev, void *data,
crtc = drm_crtc_find(dev, plane_req->crtc_id);
if (!crtc) {
+ drm_framebuffer_put(fb);
DRM_DEBUG_KMS("Unknown crtc ID %d\n",
plane_req->crtc_id);
return -ENOENT;
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index 00e6832a8c1a..904966cde32b 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -528,6 +528,10 @@ retry:
if (mode->status == MODE_OK)
mode->status = drm_mode_validate_pipeline(mode,
connector);
+
+ if (mode->status == MODE_OK)
+ mode->status = drm_mode_validate_ycbcr420(mode,
+ connector);
}
prune:
diff --git a/drivers/gpu/drm/drm_property.c b/drivers/gpu/drm/drm_property.c
index 3e88fa24eab3..bc5128203056 100644
--- a/drivers/gpu/drm/drm_property.c
+++ b/drivers/gpu/drm/drm_property.c
@@ -709,6 +709,29 @@ err_created:
}
EXPORT_SYMBOL(drm_property_replace_global_blob);
+/**
+ * drm_property_replace_blob - replace a blob property
+ * @blob: a pointer to the member blob to be replaced
+ * @new_blob: the new blob to replace with
+ *
+ * Return: true if the blob was in fact replaced.
+ */
+bool drm_property_replace_blob(struct drm_property_blob **blob,
+ struct drm_property_blob *new_blob)
+{
+ struct drm_property_blob *old_blob = *blob;
+
+ if (old_blob == new_blob)
+ return false;
+
+ drm_property_blob_put(old_blob);
+ if (new_blob)
+ drm_property_blob_get(new_blob);
+ *blob = new_blob;
+ return true;
+}
+EXPORT_SYMBOL(drm_property_replace_blob);
+
int drm_mode_getblob_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
diff --git a/drivers/gpu/drm/drm_scdc_helper.c b/drivers/gpu/drm/drm_scdc_helper.c
index 3cd96a95736d..935653eb3616 100644
--- a/drivers/gpu/drm/drm_scdc_helper.c
+++ b/drivers/gpu/drm/drm_scdc_helper.c
@@ -102,7 +102,7 @@ ssize_t drm_scdc_write(struct i2c_adapter *adapter, u8 offset,
void *data;
int err;
- data = kmalloc(1 + size, GFP_TEMPORARY);
+ data = kmalloc(1 + size, GFP_KERNEL);
if (!data)
return -ENOMEM;
@@ -194,19 +194,26 @@ EXPORT_SYMBOL(drm_scdc_set_scrambling);
* @adapter: I2C adapter for DDC channel
* @set: ret or reset the high clock ratio
*
- * TMDS clock ratio calculations go like this:
- * TMDS character = 10 bit TMDS encoded value
- * TMDS character rate = The rate at which TMDS characters are transmitted(Mcsc)
- * TMDS bit rate = 10x TMDS character rate
- * As per the spec:
- * TMDS clock rate for pixel clock < 340 MHz = 1x the character rate
- * = 1/10 pixel clock rate
- * TMDS clock rate for pixel clock > 340 MHz = 0.25x the character rate
- * = 1/40 pixel clock rate
- *
- * Writes to the TMDS config register over SCDC channel, and:
- * sets TMDS clock ratio to 1/40 when set = 1
- * sets TMDS clock ratio to 1/10 when set = 0
+ *
+ * TMDS clock ratio calculations go like this:
+ * TMDS character = 10 bit TMDS encoded value
+ *
+ * TMDS character rate = The rate at which TMDS characters are
+ * transmitted (Mcsc)
+ *
+ * TMDS bit rate = 10x TMDS character rate
+ *
+ * As per the spec:
+ * TMDS clock rate for pixel clock < 340 MHz = 1x the character
+ * rate = 1/10 pixel clock rate
+ *
+ * TMDS clock rate for pixel clock > 340 MHz = 0.25x the character
+ * rate = 1/40 pixel clock rate
+ *
+ * Writes to the TMDS config register over SCDC channel, and:
+ * sets TMDS clock ratio to 1/40 when set = 1
+ *
+ * sets TMDS clock ratio to 1/10 when set = 0
*
* Returns:
* True if write is successful, false otherwise.
diff --git a/drivers/gpu/drm/drm_simple_kms_helper.c b/drivers/gpu/drm/drm_simple_kms_helper.c
index e084f9f8ca66..dc9fd109de14 100644
--- a/drivers/gpu/drm/drm_simple_kms_helper.c
+++ b/drivers/gpu/drm/drm_simple_kms_helper.c
@@ -37,10 +37,18 @@ static const struct drm_encoder_funcs drm_simple_kms_encoder_funcs = {
static int drm_simple_kms_crtc_check(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
+ bool has_primary = state->plane_mask &
+ BIT(drm_plane_index(crtc->primary));
+
+ /* We always want to have an active plane with an active CRTC */
+ if (has_primary != state->enable)
+ return -EINVAL;
+
return drm_atomic_add_affected_planes(state->state, crtc);
}
-static void drm_simple_kms_crtc_enable(struct drm_crtc *crtc)
+static void drm_simple_kms_crtc_enable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
{
struct drm_simple_display_pipe *pipe;
@@ -51,7 +59,8 @@ static void drm_simple_kms_crtc_enable(struct drm_crtc *crtc)
pipe->funcs->enable(pipe, crtc->state);
}
-static void drm_simple_kms_crtc_disable(struct drm_crtc *crtc)
+static void drm_simple_kms_crtc_disable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
{
struct drm_simple_display_pipe *pipe;
@@ -64,8 +73,8 @@ static void drm_simple_kms_crtc_disable(struct drm_crtc *crtc)
static const struct drm_crtc_helper_funcs drm_simple_kms_crtc_helper_funcs = {
.atomic_check = drm_simple_kms_crtc_check,
- .disable = drm_simple_kms_crtc_disable,
- .enable = drm_simple_kms_crtc_enable,
+ .atomic_enable = drm_simple_kms_crtc_enable,
+ .atomic_disable = drm_simple_kms_crtc_disable,
};
static const struct drm_crtc_funcs drm_simple_kms_crtc_funcs = {
@@ -88,9 +97,6 @@ static int drm_simple_kms_plane_atomic_check(struct drm_plane *plane,
pipe = container_of(plane, struct drm_simple_display_pipe, plane);
crtc_state = drm_atomic_get_new_crtc_state(plane_state->state,
&pipe->crtc);
- if (crtc_state->enable != !!plane_state->crtc)
- return -EINVAL; /* plane must match crtc enable state */
-
if (!crtc_state->enable)
return 0; /* nothing to check when disabling or disabled */
@@ -193,6 +199,7 @@ EXPORT_SYMBOL(drm_simple_display_pipe_attach_bridge);
* @funcs: callbacks for the display pipe (optional)
* @formats: array of supported formats (DRM_FORMAT\_\*)
* @format_count: number of elements in @formats
+ * @format_modifiers: array of formats modifiers
* @connector: connector to attach and register (optional)
*
* Sets up a display pipeline which consist of a really simple
@@ -213,6 +220,7 @@ int drm_simple_display_pipe_init(struct drm_device *dev,
struct drm_simple_display_pipe *pipe,
const struct drm_simple_display_pipe_funcs *funcs,
const uint32_t *formats, unsigned int format_count,
+ const uint64_t *format_modifiers,
struct drm_connector *connector)
{
struct drm_encoder *encoder = &pipe->encoder;
@@ -227,6 +235,7 @@ int drm_simple_display_pipe_init(struct drm_device *dev,
ret = drm_universal_plane_init(dev, plane, 0,
&drm_simple_kms_plane_funcs,
formats, format_count,
+ format_modifiers,
DRM_PLANE_TYPE_PRIMARY, NULL);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
index 789ba0b37f7b..0422b8c2c2e7 100644
--- a/drivers/gpu/drm/drm_syncobj.c
+++ b/drivers/gpu/drm/drm_syncobj.c
@@ -1,5 +1,7 @@
/*
* Copyright 2017 Red Hat
+ * Parts ported from amdgpu (fence wait code).
+ * Copyright 2016 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -31,6 +33,9 @@
* that contain an optional fence. The fence can be updated with a new
* fence, or be NULL.
*
+ * syncobj's can be waited upon, where it will wait for the underlying
+ * fence.
+ *
* syncobj's can be export to fd's and back, these fd's are opaque and
* have no other use case, except passing the syncobj between processes.
*
@@ -46,6 +51,7 @@
#include <linux/fs.h>
#include <linux/anon_inodes.h>
#include <linux/sync_file.h>
+#include <linux/sched/signal.h>
#include "drm_internal.h"
#include <drm/drm_syncobj.h>
@@ -75,6 +81,75 @@ struct drm_syncobj *drm_syncobj_find(struct drm_file *file_private,
}
EXPORT_SYMBOL(drm_syncobj_find);
+static void drm_syncobj_add_callback_locked(struct drm_syncobj *syncobj,
+ struct drm_syncobj_cb *cb,
+ drm_syncobj_func_t func)
+{
+ cb->func = func;
+ list_add_tail(&cb->node, &syncobj->cb_list);
+}
+
+static int drm_syncobj_fence_get_or_add_callback(struct drm_syncobj *syncobj,
+ struct dma_fence **fence,
+ struct drm_syncobj_cb *cb,
+ drm_syncobj_func_t func)
+{
+ int ret;
+
+ *fence = drm_syncobj_fence_get(syncobj);
+ if (*fence)
+ return 1;
+
+ spin_lock(&syncobj->lock);
+ /* We've already tried once to get a fence and failed. Now that we
+ * have the lock, try one more time just to be sure we don't add a
+ * callback when a fence has already been set.
+ */
+ if (syncobj->fence) {
+ *fence = dma_fence_get(syncobj->fence);
+ ret = 1;
+ } else {
+ *fence = NULL;
+ drm_syncobj_add_callback_locked(syncobj, cb, func);
+ ret = 0;
+ }
+ spin_unlock(&syncobj->lock);
+
+ return ret;
+}
+
+/**
+ * drm_syncobj_add_callback - adds a callback to syncobj::cb_list
+ * @syncobj: Sync object to which to add the callback
+ * @cb: Callback to add
+ * @func: Func to use when initializing the drm_syncobj_cb struct
+ *
+ * This adds a callback to be called next time the fence is replaced
+ */
+void drm_syncobj_add_callback(struct drm_syncobj *syncobj,
+ struct drm_syncobj_cb *cb,
+ drm_syncobj_func_t func)
+{
+ spin_lock(&syncobj->lock);
+ drm_syncobj_add_callback_locked(syncobj, cb, func);
+ spin_unlock(&syncobj->lock);
+}
+EXPORT_SYMBOL(drm_syncobj_add_callback);
+
+/**
+ * drm_syncobj_add_callback - removes a callback to syncobj::cb_list
+ * @syncobj: Sync object from which to remove the callback
+ * @cb: Callback to remove
+ */
+void drm_syncobj_remove_callback(struct drm_syncobj *syncobj,
+ struct drm_syncobj_cb *cb)
+{
+ spin_lock(&syncobj->lock);
+ list_del_init(&cb->node);
+ spin_unlock(&syncobj->lock);
+}
+EXPORT_SYMBOL(drm_syncobj_remove_callback);
+
/**
* drm_syncobj_replace_fence - replace fence in a sync object.
* @syncobj: Sync object to replace fence in
@@ -86,18 +161,75 @@ void drm_syncobj_replace_fence(struct drm_syncobj *syncobj,
struct dma_fence *fence)
{
struct dma_fence *old_fence;
+ struct drm_syncobj_cb *cur, *tmp;
if (fence)
dma_fence_get(fence);
- old_fence = xchg(&syncobj->fence, fence);
+
+ spin_lock(&syncobj->lock);
+
+ old_fence = syncobj->fence;
+ syncobj->fence = fence;
+
+ if (fence != old_fence) {
+ list_for_each_entry_safe(cur, tmp, &syncobj->cb_list, node) {
+ list_del_init(&cur->node);
+ cur->func(syncobj, cur);
+ }
+ }
+
+ spin_unlock(&syncobj->lock);
dma_fence_put(old_fence);
}
EXPORT_SYMBOL(drm_syncobj_replace_fence);
-int drm_syncobj_fence_get(struct drm_file *file_private,
- u32 handle,
- struct dma_fence **fence)
+struct drm_syncobj_null_fence {
+ struct dma_fence base;
+ spinlock_t lock;
+};
+
+static const char *drm_syncobj_null_fence_get_name(struct dma_fence *fence)
+{
+ return "syncobjnull";
+}
+
+static bool drm_syncobj_null_fence_enable_signaling(struct dma_fence *fence)
+{
+ dma_fence_enable_sw_signaling(fence);
+ return !dma_fence_is_signaled(fence);
+}
+
+static const struct dma_fence_ops drm_syncobj_null_fence_ops = {
+ .get_driver_name = drm_syncobj_null_fence_get_name,
+ .get_timeline_name = drm_syncobj_null_fence_get_name,
+ .enable_signaling = drm_syncobj_null_fence_enable_signaling,
+ .wait = dma_fence_default_wait,
+ .release = NULL,
+};
+
+static int drm_syncobj_assign_null_handle(struct drm_syncobj *syncobj)
+{
+ struct drm_syncobj_null_fence *fence;
+ fence = kzalloc(sizeof(*fence), GFP_KERNEL);
+ if (fence == NULL)
+ return -ENOMEM;
+
+ spin_lock_init(&fence->lock);
+ dma_fence_init(&fence->base, &drm_syncobj_null_fence_ops,
+ &fence->lock, 0, 0);
+ dma_fence_signal(&fence->base);
+
+ drm_syncobj_replace_fence(syncobj, &fence->base);
+
+ dma_fence_put(&fence->base);
+
+ return 0;
+}
+
+int drm_syncobj_find_fence(struct drm_file *file_private,
+ u32 handle,
+ struct dma_fence **fence)
{
struct drm_syncobj *syncobj = drm_syncobj_find(file_private, handle);
int ret = 0;
@@ -105,14 +237,14 @@ int drm_syncobj_fence_get(struct drm_file *file_private,
if (!syncobj)
return -ENOENT;
- *fence = dma_fence_get(syncobj->fence);
+ *fence = drm_syncobj_fence_get(syncobj);
if (!*fence) {
ret = -EINVAL;
}
drm_syncobj_put(syncobj);
return ret;
}
-EXPORT_SYMBOL(drm_syncobj_fence_get);
+EXPORT_SYMBOL(drm_syncobj_find_fence);
/**
* drm_syncobj_free - free a sync object.
@@ -125,13 +257,13 @@ void drm_syncobj_free(struct kref *kref)
struct drm_syncobj *syncobj = container_of(kref,
struct drm_syncobj,
refcount);
- dma_fence_put(syncobj->fence);
+ drm_syncobj_replace_fence(syncobj, NULL);
kfree(syncobj);
}
EXPORT_SYMBOL(drm_syncobj_free);
static int drm_syncobj_create(struct drm_file *file_private,
- u32 *handle)
+ u32 *handle, uint32_t flags)
{
int ret;
struct drm_syncobj *syncobj;
@@ -141,6 +273,16 @@ static int drm_syncobj_create(struct drm_file *file_private,
return -ENOMEM;
kref_init(&syncobj->refcount);
+ INIT_LIST_HEAD(&syncobj->cb_list);
+ spin_lock_init(&syncobj->lock);
+
+ if (flags & DRM_SYNCOBJ_CREATE_SIGNALED) {
+ ret = drm_syncobj_assign_null_handle(syncobj);
+ if (ret < 0) {
+ drm_syncobj_put(syncobj);
+ return ret;
+ }
+ }
idr_preload(GFP_KERNEL);
spin_lock(&file_private->syncobj_table_lock);
@@ -307,7 +449,7 @@ int drm_syncobj_export_sync_file(struct drm_file *file_private,
if (fd < 0)
return fd;
- ret = drm_syncobj_fence_get(file_private, handle, &fence);
+ ret = drm_syncobj_find_fence(file_private, handle, &fence);
if (ret)
goto err_put_fd;
@@ -330,7 +472,6 @@ err_put_fd:
}
/**
* drm_syncobj_open - initalizes syncobj file-private structures at devnode open time
- * @dev: drm_device which is being opened by userspace
* @file_private: drm file-private structure to set up
*
* Called at device open time, sets up the structure for handling refcounting
@@ -354,7 +495,6 @@ drm_syncobj_release_handle(int id, void *ptr, void *data)
/**
* drm_syncobj_release - release file-private sync object resources
- * @dev: drm_device which is being closed by userspace
* @file_private: drm file-private structure to clean up
*
* Called at close time when the filp is going away.
@@ -379,11 +519,11 @@ drm_syncobj_create_ioctl(struct drm_device *dev, void *data,
return -ENODEV;
/* no valid flags yet */
- if (args->flags)
+ if (args->flags & ~DRM_SYNCOBJ_CREATE_SIGNALED)
return -EINVAL;
return drm_syncobj_create(file_private,
- &args->handle);
+ &args->handle, args->flags);
}
int
@@ -449,3 +589,368 @@ drm_syncobj_fd_to_handle_ioctl(struct drm_device *dev, void *data,
return drm_syncobj_fd_to_handle(file_private, args->fd,
&args->handle);
}
+
+struct syncobj_wait_entry {
+ struct task_struct *task;
+ struct dma_fence *fence;
+ struct dma_fence_cb fence_cb;
+ struct drm_syncobj_cb syncobj_cb;
+};
+
+static void syncobj_wait_fence_func(struct dma_fence *fence,
+ struct dma_fence_cb *cb)
+{
+ struct syncobj_wait_entry *wait =
+ container_of(cb, struct syncobj_wait_entry, fence_cb);
+
+ wake_up_process(wait->task);
+}
+
+static void syncobj_wait_syncobj_func(struct drm_syncobj *syncobj,
+ struct drm_syncobj_cb *cb)
+{
+ struct syncobj_wait_entry *wait =
+ container_of(cb, struct syncobj_wait_entry, syncobj_cb);
+
+ /* This happens inside the syncobj lock */
+ wait->fence = dma_fence_get(syncobj->fence);
+ wake_up_process(wait->task);
+}
+
+static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
+ uint32_t count,
+ uint32_t flags,
+ signed long timeout,
+ uint32_t *idx)
+{
+ struct syncobj_wait_entry *entries;
+ struct dma_fence *fence;
+ signed long ret;
+ uint32_t signaled_count, i;
+
+ entries = kcalloc(count, sizeof(*entries), GFP_KERNEL);
+ if (!entries)
+ return -ENOMEM;
+
+ /* Walk the list of sync objects and initialize entries. We do
+ * this up-front so that we can properly return -EINVAL if there is
+ * a syncobj with a missing fence and then never have the chance of
+ * returning -EINVAL again.
+ */
+ signaled_count = 0;
+ for (i = 0; i < count; ++i) {
+ entries[i].task = current;
+ entries[i].fence = drm_syncobj_fence_get(syncobjs[i]);
+ if (!entries[i].fence) {
+ if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) {
+ continue;
+ } else {
+ ret = -EINVAL;
+ goto cleanup_entries;
+ }
+ }
+
+ if (dma_fence_is_signaled(entries[i].fence)) {
+ if (signaled_count == 0 && idx)
+ *idx = i;
+ signaled_count++;
+ }
+ }
+
+ /* Initialize ret to the max of timeout and 1. That way, the
+ * default return value indicates a successful wait and not a
+ * timeout.
+ */
+ ret = max_t(signed long, timeout, 1);
+
+ if (signaled_count == count ||
+ (signaled_count > 0 &&
+ !(flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL)))
+ goto cleanup_entries;
+
+ /* There's a very annoying laxness in the dma_fence API here, in
+ * that backends are not required to automatically report when a
+ * fence is signaled prior to fence->ops->enable_signaling() being
+ * called. So here if we fail to match signaled_count, we need to
+ * fallthough and try a 0 timeout wait!
+ */
+
+ if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) {
+ for (i = 0; i < count; ++i) {
+ drm_syncobj_fence_get_or_add_callback(syncobjs[i],
+ &entries[i].fence,
+ &entries[i].syncobj_cb,
+ syncobj_wait_syncobj_func);
+ }
+ }
+
+ do {
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ signaled_count = 0;
+ for (i = 0; i < count; ++i) {
+ fence = entries[i].fence;
+ if (!fence)
+ continue;
+
+ if (dma_fence_is_signaled(fence) ||
+ (!entries[i].fence_cb.func &&
+ dma_fence_add_callback(fence,
+ &entries[i].fence_cb,
+ syncobj_wait_fence_func))) {
+ /* The fence has been signaled */
+ if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL) {
+ signaled_count++;
+ } else {
+ if (idx)
+ *idx = i;
+ goto done_waiting;
+ }
+ }
+ }
+
+ if (signaled_count == count)
+ goto done_waiting;
+
+ if (timeout == 0) {
+ /* If we are doing a 0 timeout wait and we got
+ * here, then we just timed out.
+ */
+ ret = 0;
+ goto done_waiting;
+ }
+
+ ret = schedule_timeout(ret);
+
+ if (ret > 0 && signal_pending(current))
+ ret = -ERESTARTSYS;
+ } while (ret > 0);
+
+done_waiting:
+ __set_current_state(TASK_RUNNING);
+
+cleanup_entries:
+ for (i = 0; i < count; ++i) {
+ if (entries[i].syncobj_cb.func)
+ drm_syncobj_remove_callback(syncobjs[i],
+ &entries[i].syncobj_cb);
+ if (entries[i].fence_cb.func)
+ dma_fence_remove_callback(entries[i].fence,
+ &entries[i].fence_cb);
+ dma_fence_put(entries[i].fence);
+ }
+ kfree(entries);
+
+ return ret;
+}
+
+/**
+ * drm_timeout_abs_to_jiffies - calculate jiffies timeout from absolute value
+ *
+ * @timeout_nsec: timeout nsec component in ns, 0 for poll
+ *
+ * Calculate the timeout in jiffies from an absolute time in sec/nsec.
+ */
+static signed long drm_timeout_abs_to_jiffies(int64_t timeout_nsec)
+{
+ ktime_t abs_timeout, now;
+ u64 timeout_ns, timeout_jiffies64;
+
+ /* make 0 timeout means poll - absolute 0 doesn't seem valid */
+ if (timeout_nsec == 0)
+ return 0;
+
+ abs_timeout = ns_to_ktime(timeout_nsec);
+ now = ktime_get();
+
+ if (!ktime_after(abs_timeout, now))
+ return 0;
+
+ timeout_ns = ktime_to_ns(ktime_sub(abs_timeout, now));
+
+ timeout_jiffies64 = nsecs_to_jiffies64(timeout_ns);
+ /* clamp timeout to avoid infinite timeout */
+ if (timeout_jiffies64 >= MAX_SCHEDULE_TIMEOUT - 1)
+ return MAX_SCHEDULE_TIMEOUT - 1;
+
+ return timeout_jiffies64 + 1;
+}
+
+static int drm_syncobj_array_wait(struct drm_device *dev,
+ struct drm_file *file_private,
+ struct drm_syncobj_wait *wait,
+ struct drm_syncobj **syncobjs)
+{
+ signed long timeout = drm_timeout_abs_to_jiffies(wait->timeout_nsec);
+ signed long ret = 0;
+ uint32_t first = ~0;
+
+ ret = drm_syncobj_array_wait_timeout(syncobjs,
+ wait->count_handles,
+ wait->flags,
+ timeout, &first);
+ if (ret < 0)
+ return ret;
+
+ wait->first_signaled = first;
+ if (ret == 0)
+ return -ETIME;
+ return 0;
+}
+
+static int drm_syncobj_array_find(struct drm_file *file_private,
+ void *user_handles, uint32_t count_handles,
+ struct drm_syncobj ***syncobjs_out)
+{
+ uint32_t i, *handles;
+ struct drm_syncobj **syncobjs;
+ int ret;
+
+ handles = kmalloc_array(count_handles, sizeof(*handles), GFP_KERNEL);
+ if (handles == NULL)
+ return -ENOMEM;
+
+ if (copy_from_user(handles, user_handles,
+ sizeof(uint32_t) * count_handles)) {
+ ret = -EFAULT;
+ goto err_free_handles;
+ }
+
+ syncobjs = kmalloc_array(count_handles, sizeof(*syncobjs), GFP_KERNEL);
+ if (syncobjs == NULL) {
+ ret = -ENOMEM;
+ goto err_free_handles;
+ }
+
+ for (i = 0; i < count_handles; i++) {
+ syncobjs[i] = drm_syncobj_find(file_private, handles[i]);
+ if (!syncobjs[i]) {
+ ret = -ENOENT;
+ goto err_put_syncobjs;
+ }
+ }
+
+ kfree(handles);
+ *syncobjs_out = syncobjs;
+ return 0;
+
+err_put_syncobjs:
+ while (i-- > 0)
+ drm_syncobj_put(syncobjs[i]);
+ kfree(syncobjs);
+err_free_handles:
+ kfree(handles);
+
+ return ret;
+}
+
+static void drm_syncobj_array_free(struct drm_syncobj **syncobjs,
+ uint32_t count)
+{
+ uint32_t i;
+ for (i = 0; i < count; i++)
+ drm_syncobj_put(syncobjs[i]);
+ kfree(syncobjs);
+}
+
+int
+drm_syncobj_wait_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_private)
+{
+ struct drm_syncobj_wait *args = data;
+ struct drm_syncobj **syncobjs;
+ int ret = 0;
+
+ if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
+ return -ENODEV;
+
+ if (args->flags & ~(DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL |
+ DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT))
+ return -EINVAL;
+
+ if (args->count_handles == 0)
+ return -EINVAL;
+
+ ret = drm_syncobj_array_find(file_private,
+ u64_to_user_ptr(args->handles),
+ args->count_handles,
+ &syncobjs);
+ if (ret < 0)
+ return ret;
+
+ ret = drm_syncobj_array_wait(dev, file_private,
+ args, syncobjs);
+
+ drm_syncobj_array_free(syncobjs, args->count_handles);
+
+ return ret;
+}
+
+int
+drm_syncobj_reset_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_private)
+{
+ struct drm_syncobj_array *args = data;
+ struct drm_syncobj **syncobjs;
+ uint32_t i;
+ int ret;
+
+ if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
+ return -ENODEV;
+
+ if (args->pad != 0)
+ return -EINVAL;
+
+ if (args->count_handles == 0)
+ return -EINVAL;
+
+ ret = drm_syncobj_array_find(file_private,
+ u64_to_user_ptr(args->handles),
+ args->count_handles,
+ &syncobjs);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < args->count_handles; i++)
+ drm_syncobj_replace_fence(syncobjs[i], NULL);
+
+ drm_syncobj_array_free(syncobjs, args->count_handles);
+
+ return 0;
+}
+
+int
+drm_syncobj_signal_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_private)
+{
+ struct drm_syncobj_array *args = data;
+ struct drm_syncobj **syncobjs;
+ uint32_t i;
+ int ret;
+
+ if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
+ return -ENODEV;
+
+ if (args->pad != 0)
+ return -EINVAL;
+
+ if (args->count_handles == 0)
+ return -EINVAL;
+
+ ret = drm_syncobj_array_find(file_private,
+ u64_to_user_ptr(args->handles),
+ args->count_handles,
+ &syncobjs);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < args->count_handles; i++) {
+ ret = drm_syncobj_assign_null_handle(syncobjs[i]);
+ if (ret < 0)
+ break;
+ }
+
+ drm_syncobj_array_free(syncobjs, args->count_handles);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/drm_vblank.c b/drivers/gpu/drm/drm_vblank.c
index e9f33cd805dd..70f2b9593edc 100644
--- a/drivers/gpu/drm/drm_vblank.c
+++ b/drivers/gpu/drm/drm_vblank.c
@@ -31,6 +31,41 @@
#include "drm_trace.h"
#include "drm_internal.h"
+/**
+ * DOC: vblank handling
+ *
+ * Vertical blanking plays a major role in graphics rendering. To achieve
+ * tear-free display, users must synchronize page flips and/or rendering to
+ * vertical blanking. The DRM API offers ioctls to perform page flips
+ * synchronized to vertical blanking and wait for vertical blanking.
+ *
+ * The DRM core handles most of the vertical blanking management logic, which
+ * involves filtering out spurious interrupts, keeping race-free blanking
+ * counters, coping with counter wrap-around and resets and keeping use counts.
+ * It relies on the driver to generate vertical blanking interrupts and
+ * optionally provide a hardware vertical blanking counter.
+ *
+ * Drivers must initialize the vertical blanking handling core with a call to
+ * drm_vblank_init(). Minimally, a driver needs to implement
+ * &drm_crtc_funcs.enable_vblank and &drm_crtc_funcs.disable_vblank plus call
+ * drm_crtc_handle_vblank() in it's vblank interrupt handler for working vblank
+ * support.
+ *
+ * Vertical blanking interrupts can be enabled by the DRM core or by drivers
+ * themselves (for instance to handle page flipping operations). The DRM core
+ * maintains a vertical blanking use count to ensure that the interrupts are not
+ * disabled while a user still needs them. To increment the use count, drivers
+ * call drm_crtc_vblank_get() and release the vblank reference again with
+ * drm_crtc_vblank_put(). In between these two calls vblank interrupts are
+ * guaranteed to be enabled.
+ *
+ * On many hardware disabling the vblank interrupt cannot be done in a race-free
+ * manner, see &drm_driver.vblank_disable_immediate and
+ * &drm_driver.max_vblank_count. In that case the vblank core only disables the
+ * vblanks after a timer has expired, which can be configured through the
+ * ``vblankoffdelay`` module parameter.
+ */
+
/* Retry timestamp calculation up to 3 times to satisfy
* drm_timestamp_precision before giving up.
*/
@@ -259,16 +294,17 @@ static u32 drm_vblank_count(struct drm_device *dev, unsigned int pipe)
}
/**
- * drm_accurate_vblank_count - retrieve the master vblank counter
+ * drm_crtc_accurate_vblank_count - retrieve the master vblank counter
* @crtc: which counter to retrieve
*
- * This function is similar to @drm_crtc_vblank_count but this
- * function interpolates to handle a race with vblank irq's.
+ * This function is similar to drm_crtc_vblank_count() but this function
+ * interpolates to handle a race with vblank interrupts using the high precision
+ * timestamping support.
*
- * This is mostly useful for hardware that can obtain the scanout
- * position, but doesn't have a frame counter.
+ * This is mostly useful for hardware that can obtain the scanout position, but
+ * doesn't have a hardware frame counter.
*/
-u32 drm_accurate_vblank_count(struct drm_crtc *crtc)
+u32 drm_crtc_accurate_vblank_count(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
unsigned int pipe = drm_crtc_index(crtc);
@@ -287,7 +323,7 @@ u32 drm_accurate_vblank_count(struct drm_crtc *crtc)
return vblank;
}
-EXPORT_SYMBOL(drm_accurate_vblank_count);
+EXPORT_SYMBOL(drm_crtc_accurate_vblank_count);
static void __disable_vblank(struct drm_device *dev, unsigned int pipe)
{
@@ -358,15 +394,6 @@ static void vblank_disable_fn(unsigned long arg)
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
}
-/**
- * drm_vblank_cleanup - cleanup vblank support
- * @dev: DRM device
- *
- * This function cleans up any resources allocated in drm_vblank_init.
- *
- * Drivers which don't use drm_irq_install() need to set &drm_device.irq_enabled
- * themselves, to signal to the DRM core that vblank interrupts are enabled.
- */
void drm_vblank_cleanup(struct drm_device *dev)
{
unsigned int pipe;
@@ -388,7 +415,6 @@ void drm_vblank_cleanup(struct drm_device *dev)
dev->num_crtcs = 0;
}
-EXPORT_SYMBOL(drm_vblank_cleanup);
/**
* drm_vblank_init - initialize vblank support
@@ -396,6 +422,8 @@ EXPORT_SYMBOL(drm_vblank_cleanup);
* @num_crtcs: number of CRTCs supported by @dev
*
* This function initializes vblank support for @num_crtcs display pipelines.
+ * Cleanup is handled by the DRM core, or through calling drm_dev_fini() for
+ * drivers with a &drm_driver.release callback.
*
* Returns:
* Zero on success or a negative error code on failure.
@@ -468,11 +496,11 @@ EXPORT_SYMBOL(drm_crtc_vblank_waitqueue);
* @crtc: drm_crtc whose timestamp constants should be updated.
* @mode: display mode containing the scanout timings
*
- * Calculate and store various constants which are later
- * needed by vblank and swap-completion timestamping, e.g,
- * by drm_calc_vbltimestamp_from_scanoutpos(). They are
- * derived from CRTC's true scanout timing, so they take
- * things like panel scaling or other adjustments into account.
+ * Calculate and store various constants which are later needed by vblank and
+ * swap-completion timestamping, e.g, by
+ * drm_calc_vbltimestamp_from_scanoutpos(). They are derived from CRTC's true
+ * scanout timing, so they take things like panel scaling or other adjustments
+ * into account.
*/
void drm_calc_timestamping_constants(struct drm_crtc *crtc,
const struct drm_display_mode *mode)
@@ -535,25 +563,14 @@ EXPORT_SYMBOL(drm_calc_timestamping_constants);
* if flag is set.
*
* Implements calculation of exact vblank timestamps from given drm_display_mode
- * timings and current video scanout position of a CRTC. This can be called from
- * within get_vblank_timestamp() implementation of a kms driver to implement the
- * actual timestamping.
- *
- * Should return timestamps conforming to the OML_sync_control OpenML
- * extension specification. The timestamp corresponds to the end of
- * the vblank interval, aka start of scanout of topmost-leftmost display
- * pixel in the following video frame.
+ * timings and current video scanout position of a CRTC. This can be directly
+ * used as the &drm_driver.get_vblank_timestamp implementation of a kms driver
+ * if &drm_driver.get_scanout_position is implemented.
*
- * Requires support for optional dev->driver->get_scanout_position()
- * in kms driver, plus a bit of setup code to provide a drm_display_mode
- * that corresponds to the true scanout timing.
- *
- * The current implementation only handles standard video modes. It
- * returns as no operation if a doublescan or interlaced video mode is
- * active. Higher level code is expected to handle this.
- *
- * This function can be used to implement the &drm_driver.get_vblank_timestamp
- * directly, if the driver implements the &drm_driver.get_scanout_position hook.
+ * The current implementation only handles standard video modes. For double scan
+ * and interlaced modes the driver is supposed to adjust the hardware mode
+ * (taken from &drm_crtc_state.adjusted mode for atomic modeset drivers) to
+ * match the scanout position reported.
*
* Note that atomic drivers must call drm_calc_timestamping_constants() before
* enabling a CRTC. The atomic helpers already take care of that in
@@ -738,7 +755,9 @@ drm_get_last_vbltimestamp(struct drm_device *dev, unsigned int pipe,
*
* Fetches the "cooked" vblank count value that represents the number of
* vblank events since the system was booted, including lost events due to
- * modesetting activity.
+ * modesetting activity. Note that this timer isn't correct against a racing
+ * vblank interrupt (since it only reports the software vblank counter), see
+ * drm_crtc_accurate_vblank_count() for such use-cases.
*
* Returns:
* The software vblank counter.
@@ -749,20 +768,6 @@ u32 drm_crtc_vblank_count(struct drm_crtc *crtc)
}
EXPORT_SYMBOL(drm_crtc_vblank_count);
-/**
- * drm_vblank_count_and_time - retrieve "cooked" vblank counter value and the
- * system timestamp corresponding to that vblank counter value.
- * @dev: DRM device
- * @pipe: index of CRTC whose counter to retrieve
- * @vblanktime: Pointer to struct timeval to receive the vblank timestamp.
- *
- * Fetches the "cooked" vblank count value that represents the number of
- * vblank events since the system was booted, including lost events due to
- * modesetting activity. Returns corresponding system timestamp of the time
- * of the vblank interval that corresponds to the current vblank counter value.
- *
- * This is the legacy version of drm_crtc_vblank_count_and_time().
- */
static u32 drm_vblank_count_and_time(struct drm_device *dev, unsigned int pipe,
struct timeval *vblanktime)
{
@@ -831,7 +836,7 @@ static void send_vblank_event(struct drm_device *dev,
* NOTE: Drivers using this to send out the &drm_crtc_state.event as part of an
* atomic commit must ensure that the next vblank happens at exactly the same
* time as the atomic commit is committed to the hardware. This function itself
- * does **not** protect again the next vblank interrupt racing with either this
+ * does **not** protect against the next vblank interrupt racing with either this
* function call or the atomic commit operation. A possible sequence could be:
*
* 1. Driver commits new hardware state into vblank-synchronized registers.
@@ -852,8 +857,8 @@ static void send_vblank_event(struct drm_device *dev,
* handler by calling drm_crtc_send_vblank_event() and make sure that there's no
* possible race with the hardware committing the atomic update.
*
- * Caller must hold event lock. Caller must also hold a vblank reference for
- * the event @e, which will be dropped when the next vblank arrives.
+ * Caller must hold a vblank reference for the event @e, which will be dropped
+ * when the next vblank arrives.
*/
void drm_crtc_arm_vblank_event(struct drm_crtc *crtc,
struct drm_pending_vblank_event *e)
@@ -913,14 +918,6 @@ static int __enable_vblank(struct drm_device *dev, unsigned int pipe)
return dev->driver->enable_vblank(dev, pipe);
}
-/**
- * drm_vblank_enable - enable the vblank interrupt on a CRTC
- * @dev: DRM device
- * @pipe: CRTC index
- *
- * Returns:
- * Zero on success or a negative error code on failure.
- */
static int drm_vblank_enable(struct drm_device *dev, unsigned int pipe)
{
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
@@ -958,19 +955,6 @@ static int drm_vblank_enable(struct drm_device *dev, unsigned int pipe)
return ret;
}
-/**
- * drm_vblank_get - get a reference count on vblank events
- * @dev: DRM device
- * @pipe: index of CRTC to own
- *
- * Acquire a reference count on vblank events to avoid having them disabled
- * while in use.
- *
- * This is the legacy version of drm_crtc_vblank_get().
- *
- * Returns:
- * Zero on success or a negative error code on failure.
- */
static int drm_vblank_get(struct drm_device *dev, unsigned int pipe)
{
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
@@ -1014,16 +998,6 @@ int drm_crtc_vblank_get(struct drm_crtc *crtc)
}
EXPORT_SYMBOL(drm_crtc_vblank_get);
-/**
- * drm_vblank_put - release ownership of vblank events
- * @dev: DRM device
- * @pipe: index of CRTC to release
- *
- * Release ownership of a given vblank counter, turning off interrupts
- * if possible. Disable interrupts after drm_vblank_offdelay milliseconds.
- *
- * This is the legacy version of drm_crtc_vblank_put().
- */
static void drm_vblank_put(struct drm_device *dev, unsigned int pipe)
{
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
@@ -1067,6 +1041,8 @@ EXPORT_SYMBOL(drm_crtc_vblank_put);
* This waits for one vblank to pass on @pipe, using the irq driver interfaces.
* It is a failure to call this when the vblank irq for @pipe is disabled, e.g.
* due to lack of driver support or because the crtc is off.
+ *
+ * This is the legacy version of drm_crtc_wait_one_vblank().
*/
void drm_wait_one_vblank(struct drm_device *dev, unsigned int pipe)
{
@@ -1116,7 +1092,7 @@ EXPORT_SYMBOL(drm_crtc_wait_one_vblank);
* stored so that drm_vblank_on can restore it again.
*
* Drivers must use this function when the hardware vblank counter can get
- * reset, e.g. when suspending.
+ * reset, e.g. when suspending or disabling the @crtc in general.
*/
void drm_crtc_vblank_off(struct drm_crtc *crtc)
{
@@ -1184,6 +1160,8 @@ EXPORT_SYMBOL(drm_crtc_vblank_off);
* drm_crtc_vblank_on() functions. The difference compared to
* drm_crtc_vblank_off() is that this function doesn't save the vblank counter
* and hence doesn't need to call any driver hooks.
+ *
+ * This is useful for recovering driver state e.g. on driver load, or on resume.
*/
void drm_crtc_vblank_reset(struct drm_crtc *crtc)
{
@@ -1212,9 +1190,10 @@ EXPORT_SYMBOL(drm_crtc_vblank_reset);
* @crtc: CRTC in question
*
* This functions restores the vblank interrupt state captured with
- * drm_crtc_vblank_off() again. Note that calls to drm_crtc_vblank_on() and
- * drm_crtc_vblank_off() can be unbalanced and so can also be unconditionally called
- * in driver load code to reflect the current hardware state of the crtc.
+ * drm_crtc_vblank_off() again and is generally called when enabling @crtc. Note
+ * that calls to drm_crtc_vblank_on() and drm_crtc_vblank_off() can be
+ * unbalanced and so can also be unconditionally called in driver load code to
+ * reflect the current hardware state of the crtc.
*/
void drm_crtc_vblank_on(struct drm_crtc *crtc)
{
@@ -1299,8 +1278,8 @@ static void drm_legacy_vblank_post_modeset(struct drm_device *dev,
}
}
-int drm_legacy_modeset_ctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+int drm_legacy_modeset_ctl_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
struct drm_modeset_ctl *modeset = data;
unsigned int pipe;
@@ -1419,22 +1398,8 @@ static bool drm_wait_vblank_is_query(union drm_wait_vblank *vblwait)
_DRM_VBLANK_NEXTONMISS));
}
-/*
- * Wait for VBLANK.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param data user argument, pointing to a drm_wait_vblank structure.
- * \return zero on success or a negative number on failure.
- *
- * This function enables the vblank interrupt on the pipe requested, then
- * sleeps waiting for the requested sequence number to occur, and drops
- * the vblank interrupt refcount afterwards. (vblank IRQ disable follows that
- * after a timeout with no further vblank waits scheduled).
- */
-int drm_wait_vblank(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+int drm_wait_vblank_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
struct drm_vblank_crtc *vblank;
union drm_wait_vblank *vblwait = data;
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
index 1170b3209a12..2660543ad86a 100644
--- a/drivers/gpu/drm/drm_vm.c
+++ b/drivers/gpu/drm/drm_vm.c
@@ -40,6 +40,7 @@
#include <linux/efi.h>
#include <linux/slab.h>
#endif
+#include <linux/mem_encrypt.h>
#include <asm/pgtable.h>
#include "drm_internal.h"
#include "drm_legacy.h"
@@ -58,6 +59,9 @@ static pgprot_t drm_io_prot(struct drm_local_map *map,
{
pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
+ /* We don't want graphics memory to be mapped encrypted */
+ tmp = pgprot_decrypted(tmp);
+
#if defined(__i386__) || defined(__x86_64__) || defined(__powerpc__)
if (map->type == _DRM_REGISTERS && !(map->flags & _DRM_WRITE_COMBINING))
tmp = pgprot_noncached(tmp);
@@ -631,7 +635,7 @@ int drm_legacy_mmap(struct file *filp, struct vm_area_struct *vma)
struct drm_device *dev = priv->minor->dev;
int ret;
- if (drm_device_is_unplugged(dev))
+ if (drm_dev_is_unplugged(dev))
return -ENODEV;
mutex_lock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/drm_vma_manager.c b/drivers/gpu/drm/drm_vma_manager.c
index d9100b565198..28f1226576f8 100644
--- a/drivers/gpu/drm/drm_vma_manager.c
+++ b/drivers/gpu/drm/drm_vma_manager.c
@@ -147,7 +147,7 @@ struct drm_vma_offset_node *drm_vma_offset_lookup_locked(struct drm_vma_offset_m
struct rb_node *iter;
unsigned long offset;
- iter = mgr->vm_addr_space_mm.interval_tree.rb_node;
+ iter = mgr->vm_addr_space_mm.interval_tree.rb_root.rb_node;
best = NULL;
while (likely(iter)) {
diff --git a/drivers/gpu/drm/etnaviv/Kconfig b/drivers/gpu/drm/etnaviv/Kconfig
index 71cee4e9fefb..38b477b5fbf9 100644
--- a/drivers/gpu/drm/etnaviv/Kconfig
+++ b/drivers/gpu/drm/etnaviv/Kconfig
@@ -10,6 +10,8 @@ config DRM_ETNAVIV
select IOMMU_API
select IOMMU_SUPPORT
select WANT_DEV_COREDUMP
+ select CMA if HAVE_DMA_CONTIGUOUS
+ select DMA_CMA if HAVE_DMA_CONTIGUOUS
help
DRM driver for Vivante GPUs.
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
index 91e17aeee1da..2cb4773823c2 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
@@ -316,7 +316,7 @@ static int etnaviv_ioctl_gem_cpu_prep(struct drm_device *dev, void *data,
ret = etnaviv_gem_cpu_prep(obj, args->op, &TS(args->timeout));
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return ret;
}
@@ -337,7 +337,7 @@ static int etnaviv_ioctl_gem_cpu_fini(struct drm_device *dev, void *data,
ret = etnaviv_gem_cpu_fini(obj);
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return ret;
}
@@ -357,7 +357,7 @@ static int etnaviv_ioctl_gem_info(struct drm_device *dev, void *data,
return -ENOENT;
ret = etnaviv_gem_mmap_offset(obj, &args->offset);
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return ret;
}
@@ -446,7 +446,7 @@ static int etnaviv_ioctl_gem_wait(struct drm_device *dev, void *data,
ret = etnaviv_gem_wait_bo(gpu, obj, timeout);
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return ret;
}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
index 9a3bea738330..5a634594a6ce 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
@@ -68,7 +68,7 @@ static int etnaviv_gem_shmem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
struct page **p = drm_gem_get_pages(&etnaviv_obj->base);
if (IS_ERR(p)) {
- dev_err(dev->dev, "could not get pages: %ld\n", PTR_ERR(p));
+ dev_dbg(dev->dev, "could not get pages: %ld\n", PTR_ERR(p));
return PTR_ERR(p);
}
@@ -265,7 +265,7 @@ void etnaviv_gem_mapping_reference(struct etnaviv_vram_mapping *mapping)
{
struct etnaviv_gem_object *etnaviv_obj = mapping->object;
- drm_gem_object_reference(&etnaviv_obj->base);
+ drm_gem_object_get(&etnaviv_obj->base);
mutex_lock(&etnaviv_obj->lock);
WARN_ON(mapping->use == 0);
@@ -282,7 +282,7 @@ void etnaviv_gem_mapping_unreference(struct etnaviv_vram_mapping *mapping)
mapping->use -= 1;
mutex_unlock(&etnaviv_obj->lock);
- drm_gem_object_unreference_unlocked(&etnaviv_obj->base);
+ drm_gem_object_put_unlocked(&etnaviv_obj->base);
}
struct etnaviv_vram_mapping *etnaviv_gem_mapping_get(
@@ -358,7 +358,7 @@ out:
return ERR_PTR(ret);
/* Take a reference on the object */
- drm_gem_object_reference(obj);
+ drm_gem_object_get(obj);
return mapping;
}
@@ -413,6 +413,16 @@ int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
bool write = !!(op & ETNA_PREP_WRITE);
int ret;
+ if (!etnaviv_obj->sgt) {
+ void *ret;
+
+ mutex_lock(&etnaviv_obj->lock);
+ ret = etnaviv_gem_get_pages(etnaviv_obj);
+ mutex_unlock(&etnaviv_obj->lock);
+ if (IS_ERR(ret))
+ return PTR_ERR(ret);
+ }
+
if (op & ETNA_PREP_NOSYNC) {
if (!reservation_object_test_signaled_rcu(etnaviv_obj->resv,
write))
@@ -427,16 +437,6 @@ int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
}
if (etnaviv_obj->flags & ETNA_BO_CACHED) {
- if (!etnaviv_obj->sgt) {
- void *ret;
-
- mutex_lock(&etnaviv_obj->lock);
- ret = etnaviv_gem_get_pages(etnaviv_obj);
- mutex_unlock(&etnaviv_obj->lock);
- if (IS_ERR(ret))
- return PTR_ERR(ret);
- }
-
dma_sync_sg_for_cpu(dev->dev, etnaviv_obj->sgt->sgl,
etnaviv_obj->sgt->nents,
etnaviv_op_to_dma_dir(op));
@@ -662,7 +662,8 @@ static struct drm_gem_object *__etnaviv_gem_new(struct drm_device *dev,
* going to pin these pages.
*/
mapping = obj->filp->f_mapping;
- mapping_set_gfp_mask(mapping, GFP_HIGHUSER);
+ mapping_set_gfp_mask(mapping, GFP_HIGHUSER |
+ __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
}
if (ret)
@@ -671,7 +672,7 @@ static struct drm_gem_object *__etnaviv_gem_new(struct drm_device *dev,
return obj;
fail:
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return ERR_PTR(ret);
}
@@ -688,14 +689,14 @@ int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file,
ret = etnaviv_gem_obj_add(dev, obj);
if (ret < 0) {
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return ret;
}
ret = drm_gem_handle_create(file, obj, handle);
/* drop reference from allocate - handle holds it now */
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return ret;
}
@@ -712,7 +713,7 @@ struct drm_gem_object *etnaviv_gem_new(struct drm_device *dev,
ret = etnaviv_gem_obj_add(dev, obj);
if (ret < 0) {
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return ERR_PTR(ret);
}
@@ -800,7 +801,7 @@ static void __etnaviv_gem_userptr_get_pages(struct work_struct *_work)
}
mutex_unlock(&etnaviv_obj->lock);
- drm_gem_object_unreference_unlocked(&etnaviv_obj->base);
+ drm_gem_object_put_unlocked(&etnaviv_obj->base);
mmput(work->mm);
put_task_struct(work->task);
@@ -858,7 +859,7 @@ static int etnaviv_gem_userptr_get_pages(struct etnaviv_gem_object *etnaviv_obj)
}
get_task_struct(current);
- drm_gem_object_reference(&etnaviv_obj->base);
+ drm_gem_object_get(&etnaviv_obj->base);
work->mm = mm;
work->task = current;
@@ -924,6 +925,6 @@ int etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file,
ret = drm_gem_handle_create(file, &etnaviv_obj->base, handle);
unreference:
/* drop reference from allocate - handle holds it now */
- drm_gem_object_unreference_unlocked(&etnaviv_obj->base);
+ drm_gem_object_put_unlocked(&etnaviv_obj->base);
return ret;
}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
index e5da4f2300ba..ae884723e9b1 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
@@ -146,7 +146,7 @@ struct drm_gem_object *etnaviv_gem_prime_import_sg_table(struct drm_device *dev,
return &etnaviv_obj->base;
fail:
- drm_gem_object_unreference_unlocked(&etnaviv_obj->base);
+ drm_gem_object_put_unlocked(&etnaviv_obj->base);
return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
index 6463fc2c736f..026ef4e02f85 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
@@ -37,7 +37,7 @@ static struct etnaviv_gem_submit *submit_create(struct drm_device *dev,
struct etnaviv_gem_submit *submit;
size_t sz = size_vstruct(nr, sizeof(submit->bos[0]), sizeof(*submit));
- submit = kmalloc(sz, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
+ submit = kmalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
if (submit) {
submit->dev = dev;
submit->gpu = gpu;
@@ -88,7 +88,7 @@ static int submit_lookup_objects(struct etnaviv_gem_submit *submit,
* Take a refcount on the object. The file table lock
* prevents the object_idr's refcount on this being dropped.
*/
- drm_gem_object_reference(obj);
+ drm_gem_object_get(obj);
submit->bos[i].obj = to_etnaviv_bo(obj);
}
@@ -291,7 +291,7 @@ static void submit_cleanup(struct etnaviv_gem_submit *submit)
struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
submit_unlock_object(submit, i);
- drm_gem_object_unreference_unlocked(&etnaviv_obj->base);
+ drm_gem_object_put_unlocked(&etnaviv_obj->base);
}
ww_acquire_fini(&submit->ticket);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
index ada45fdd0eae..fc9a6a83dfc7 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -1622,10 +1622,12 @@ static int etnaviv_gpu_bind(struct device *dev, struct device *master,
struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
int ret;
- gpu->cooling = thermal_of_cooling_device_register(dev->of_node,
+ if (IS_ENABLED(CONFIG_THERMAL)) {
+ gpu->cooling = thermal_of_cooling_device_register(dev->of_node,
(char *)dev_name(dev), gpu, &cooling_ops);
- if (IS_ERR(gpu->cooling))
- return PTR_ERR(gpu->cooling);
+ if (IS_ERR(gpu->cooling))
+ return PTR_ERR(gpu->cooling);
+ }
#ifdef CONFIG_PM
ret = pm_runtime_get_sync(gpu->dev);
diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
index 5792ca88ab7a..730b8d9db187 100644
--- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
@@ -13,6 +13,7 @@
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/component.h>
+#include <linux/iopoll.h>
#include <linux/mfd/syscon.h>
#include <linux/of_device.h>
#include <linux/of_gpio.h>
@@ -33,9 +34,8 @@
#define WINDOWS_NR 3
#define MIN_FB_WIDTH_FOR_16WORD_BURST 128
-#define IFTYPE_I80 (1 << 0)
-#define I80_HW_TRG (1 << 1)
-#define IFTYPE_HDMI (1 << 2)
+#define I80_HW_TRG (1 << 0)
+#define IFTYPE_HDMI (1 << 1)
static const char * const decon_clks_name[] = {
"pclk",
@@ -57,6 +57,8 @@ struct decon_context {
struct regmap *sysreg;
struct clk *clks[ARRAY_SIZE(decon_clks_name)];
unsigned int irq;
+ unsigned int irq_vsync;
+ unsigned int irq_lcd_sys;
unsigned int te_irq;
unsigned long out_type;
int first_win;
@@ -90,7 +92,7 @@ static int decon_enable_vblank(struct exynos_drm_crtc *crtc)
u32 val;
val = VIDINTCON0_INTEN;
- if (ctx->out_type & IFTYPE_I80)
+ if (crtc->i80_mode)
val |= VIDINTCON0_FRAMEDONE;
else
val |= VIDINTCON0_INTFRMEN | VIDINTCON0_FRAMESEL_FP;
@@ -139,7 +141,7 @@ static u32 decon_get_frame_count(struct decon_context *ctx, bool end)
switch (status & (VIDCON1_VSTATUS_MASK | VIDCON1_I80_ACTIVE)) {
case VIDCON1_VSTATUS_VS:
- if (!(ctx->out_type & IFTYPE_I80))
+ if (!(ctx->crtc->i80_mode))
--frm;
break;
case VIDCON1_VSTATUS_BP:
@@ -166,7 +168,7 @@ static u32 decon_get_vblank_counter(struct exynos_drm_crtc *crtc)
static void decon_setup_trigger(struct decon_context *ctx)
{
- if (!(ctx->out_type & (IFTYPE_I80 | I80_HW_TRG)))
+ if (!ctx->crtc->i80_mode && !(ctx->out_type & I80_HW_TRG))
return;
if (!(ctx->out_type & I80_HW_TRG)) {
@@ -206,7 +208,7 @@ static void decon_commit(struct exynos_drm_crtc *crtc)
val = VIDOUT_LCD_ON;
if (interlaced)
val |= VIDOUT_INTERLACE_EN_F;
- if (ctx->out_type & IFTYPE_I80) {
+ if (crtc->i80_mode) {
val |= VIDOUT_COMMAND_IF;
} else {
val |= VIDOUT_RGB_IF;
@@ -222,7 +224,7 @@ static void decon_commit(struct exynos_drm_crtc *crtc)
VIDTCON2_HOZVAL(m->hdisplay - 1);
writel(val, ctx->addr + DECON_VIDTCON2);
- if (!(ctx->out_type & IFTYPE_I80)) {
+ if (!crtc->i80_mode) {
int vbp = m->crtc_vtotal - m->crtc_vsync_end;
int vfp = m->crtc_vsync_start - m->crtc_vdisplay;
@@ -277,16 +279,14 @@ static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win,
val |= WINCONx_BURSTLEN_16WORD;
break;
case DRM_FORMAT_ARGB8888:
+ default:
val |= WINCONx_BPPMODE_32BPP_A8888;
val |= WINCONx_WSWP_F | WINCONx_BLD_PIX_F | WINCONx_ALPHA_SEL_F;
val |= WINCONx_BURSTLEN_16WORD;
break;
- default:
- DRM_ERROR("Proper pixel format is not set\n");
- return;
}
- DRM_DEBUG_KMS("bpp = %u\n", fb->format->cpp[0] * 8);
+ DRM_DEBUG_KMS("cpp = %u\n", fb->format->cpp[0]);
/*
* In case of exynos, setting dma-burst to 16Word causes permanent
@@ -329,7 +329,7 @@ static void decon_update_plane(struct exynos_drm_crtc *crtc,
struct decon_context *ctx = crtc->ctx;
struct drm_framebuffer *fb = state->base.fb;
unsigned int win = plane->index;
- unsigned int bpp = fb->format->cpp[0];
+ unsigned int cpp = fb->format->cpp[0];
unsigned int pitch = fb->pitches[0];
dma_addr_t dma_addr = exynos_drm_fb_dma_addr(fb, 0);
u32 val;
@@ -365,11 +365,11 @@ static void decon_update_plane(struct exynos_drm_crtc *crtc,
writel(val, ctx->addr + DECON_VIDW0xADD1B0(win));
if (!(ctx->out_type & IFTYPE_HDMI))
- val = BIT_VAL(pitch - state->crtc.w * bpp, 27, 14)
- | BIT_VAL(state->crtc.w * bpp, 13, 0);
+ val = BIT_VAL(pitch - state->crtc.w * cpp, 27, 14)
+ | BIT_VAL(state->crtc.w * cpp, 13, 0);
else
- val = BIT_VAL(pitch - state->crtc.w * bpp, 29, 15)
- | BIT_VAL(state->crtc.w * bpp, 14, 0);
+ val = BIT_VAL(pitch - state->crtc.w * cpp, 29, 15)
+ | BIT_VAL(state->crtc.w * cpp, 14, 0);
writel(val, ctx->addr + DECON_VIDW0xADD2(win));
decon_win_set_pixfmt(ctx, win, fb);
@@ -407,24 +407,19 @@ static void decon_atomic_flush(struct exynos_drm_crtc *crtc)
static void decon_swreset(struct decon_context *ctx)
{
- unsigned int tries;
unsigned long flags;
+ u32 val;
+ int ret;
writel(0, ctx->addr + DECON_VIDCON0);
- for (tries = 2000; tries; --tries) {
- if (~readl(ctx->addr + DECON_VIDCON0) & VIDCON0_STOP_STATUS)
- break;
- udelay(10);
- }
+ readl_poll_timeout(ctx->addr + DECON_VIDCON0, val,
+ ~val & VIDCON0_STOP_STATUS, 12, 20000);
writel(VIDCON0_SWRESET, ctx->addr + DECON_VIDCON0);
- for (tries = 2000; tries; --tries) {
- if (~readl(ctx->addr + DECON_VIDCON0) & VIDCON0_SWRESET)
- break;
- udelay(10);
- }
+ ret = readl_poll_timeout(ctx->addr + DECON_VIDCON0, val,
+ ~val & VIDCON0_SWRESET, 12, 20000);
- WARN(tries == 0, "failed to software reset DECON\n");
+ WARN(ret < 0, "failed to software reset DECON\n");
spin_lock_irqsave(&ctx->vblank_lock, flags);
ctx->frame_id = 0;
@@ -515,6 +510,22 @@ err:
clk_disable_unprepare(ctx->clks[i]);
}
+static enum drm_mode_status decon_mode_valid(struct exynos_drm_crtc *crtc,
+ const struct drm_display_mode *mode)
+{
+ struct decon_context *ctx = crtc->ctx;
+
+ ctx->irq = crtc->i80_mode ? ctx->irq_lcd_sys : ctx->irq_vsync;
+
+ if (ctx->irq)
+ return MODE_OK;
+
+ dev_info(ctx->dev, "Sink requires %s mode, but appropriate interrupt is not provided.\n",
+ crtc->i80_mode ? "command" : "video");
+
+ return MODE_BAD;
+}
+
static const struct exynos_drm_crtc_ops decon_crtc_ops = {
.enable = decon_enable,
.disable = decon_disable,
@@ -524,6 +535,7 @@ static const struct exynos_drm_crtc_ops decon_crtc_ops = {
.atomic_begin = decon_atomic_begin,
.update_plane = decon_update_plane,
.disable_plane = decon_disable_plane,
+ .mode_valid = decon_mode_valid,
.atomic_flush = decon_atomic_flush,
};
@@ -674,19 +686,22 @@ static const struct of_device_id exynos5433_decon_driver_dt_match[] = {
MODULE_DEVICE_TABLE(of, exynos5433_decon_driver_dt_match);
static int decon_conf_irq(struct decon_context *ctx, const char *name,
- irq_handler_t handler, unsigned long int flags, bool required)
+ irq_handler_t handler, unsigned long int flags)
{
struct platform_device *pdev = to_platform_device(ctx->dev);
int ret, irq = platform_get_irq_byname(pdev, name);
if (irq < 0) {
- if (irq == -EPROBE_DEFER)
+ switch (irq) {
+ case -EPROBE_DEFER:
return irq;
- if (required)
- dev_err(ctx->dev, "cannot get %s IRQ\n", name);
- else
- irq = 0;
- return irq;
+ case -ENODATA:
+ case -ENXIO:
+ return 0;
+ default:
+ dev_err(ctx->dev, "IRQ %s get failed, %d\n", name, irq);
+ return irq;
+ }
}
irq_set_status_flags(irq, IRQ_NOAUTOEN);
ret = devm_request_irq(ctx->dev, irq, handler, flags, "drm_decon", ctx);
@@ -714,11 +729,8 @@ static int exynos5433_decon_probe(struct platform_device *pdev)
ctx->out_type = (unsigned long)of_device_get_match_data(dev);
spin_lock_init(&ctx->vblank_lock);
- if (ctx->out_type & IFTYPE_HDMI) {
+ if (ctx->out_type & IFTYPE_HDMI)
ctx->first_win = 1;
- } else if (of_get_child_by_name(dev->of_node, "i80-if-timings")) {
- ctx->out_type |= IFTYPE_I80;
- }
for (i = 0; i < ARRAY_SIZE(decon_clks_name); i++) {
struct clk *clk;
@@ -742,25 +754,23 @@ static int exynos5433_decon_probe(struct platform_device *pdev)
return PTR_ERR(ctx->addr);
}
- if (ctx->out_type & IFTYPE_I80) {
- ret = decon_conf_irq(ctx, "lcd_sys", decon_irq_handler, 0, true);
- if (ret < 0)
- return ret;
- ctx->irq = ret;
+ ret = decon_conf_irq(ctx, "vsync", decon_irq_handler, 0);
+ if (ret < 0)
+ return ret;
+ ctx->irq_vsync = ret;
- ret = decon_conf_irq(ctx, "te", decon_te_irq_handler,
- IRQF_TRIGGER_RISING, false);
- if (ret < 0)
- return ret;
- if (ret) {
- ctx->te_irq = ret;
- ctx->out_type &= ~I80_HW_TRG;
- }
- } else {
- ret = decon_conf_irq(ctx, "vsync", decon_irq_handler, 0, true);
- if (ret < 0)
+ ret = decon_conf_irq(ctx, "lcd_sys", decon_irq_handler, 0);
+ if (ret < 0)
+ return ret;
+ ctx->irq_lcd_sys = ret;
+
+ ret = decon_conf_irq(ctx, "te", decon_te_irq_handler,
+ IRQF_TRIGGER_RISING);
+ if (ret < 0)
return ret;
- ctx->irq = ret;
+ if (ret) {
+ ctx->te_irq = ret;
+ ctx->out_type &= ~I80_HW_TRG;
}
if (ctx->out_type & I80_HW_TRG) {
diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
index 3e88269fdc2e..615efcf7782a 100644
--- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
@@ -309,19 +309,14 @@ static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win,
val |= WINCONx_BURSTLEN_16WORD;
break;
case DRM_FORMAT_BGRA8888:
+ default:
val |= WINCONx_BPPMODE_32BPP_BGRA | WINCONx_BLD_PIX |
WINCONx_ALPHA_SEL;
val |= WINCONx_BURSTLEN_16WORD;
break;
- default:
- DRM_DEBUG_KMS("invalid pixel size so using unpacked 24bpp.\n");
-
- val |= WINCONx_BPPMODE_24BPP_xRGB;
- val |= WINCONx_BURSTLEN_16WORD;
- break;
}
- DRM_DEBUG_KMS("bpp = %d\n", fb->format->cpp[0] * 8);
+ DRM_DEBUG_KMS("cpp = %d\n", fb->format->cpp[0]);
/*
* In case of exynos, setting dma-burst to 16Word causes permanent
@@ -398,7 +393,7 @@ static void decon_update_plane(struct exynos_drm_crtc *crtc,
unsigned int last_x;
unsigned int last_y;
unsigned int win = plane->index;
- unsigned int bpp = fb->format->cpp[0];
+ unsigned int cpp = fb->format->cpp[0];
unsigned int pitch = fb->pitches[0];
if (ctx->suspended)
@@ -418,7 +413,7 @@ static void decon_update_plane(struct exynos_drm_crtc *crtc,
val = (unsigned long)exynos_drm_fb_dma_addr(fb, 0);
writel(val, ctx->regs + VIDW_BUF_START(win));
- padding = (pitch / bpp) - fb->width;
+ padding = (pitch / cpp) - fb->width;
/* buffer size */
writel(fb->width + padding, ctx->regs + VIDW_WHOLE_X(win));
diff --git a/drivers/gpu/drm/exynos/exynos_dp.c b/drivers/gpu/drm/exynos/exynos_dp.c
index 385537b726a6..39629e7a80b9 100644
--- a/drivers/gpu/drm/exynos/exynos_dp.c
+++ b/drivers/gpu/drm/exynos/exynos_dp.c
@@ -155,7 +155,7 @@ static int exynos_dp_bind(struct device *dev, struct device *master, void *data)
struct exynos_dp_device *dp = dev_get_drvdata(dev);
struct drm_encoder *encoder = &dp->encoder;
struct drm_device *drm_dev = data;
- int pipe, ret;
+ int ret;
/*
* Just like the probe function said, we don't need the
@@ -179,20 +179,15 @@ static int exynos_dp_bind(struct device *dev, struct device *master, void *data)
return ret;
}
- pipe = exynos_drm_crtc_get_pipe_from_type(drm_dev,
- EXYNOS_DISPLAY_TYPE_LCD);
- if (pipe < 0)
- return pipe;
-
- encoder->possible_crtcs = 1 << pipe;
-
- DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs);
-
drm_encoder_init(drm_dev, encoder, &exynos_dp_encoder_funcs,
DRM_MODE_ENCODER_TMDS, NULL);
drm_encoder_helper_add(encoder, &exynos_dp_encoder_helper_funcs);
+ ret = exynos_drm_set_possible_crtcs(encoder, EXYNOS_DISPLAY_TYPE_LCD);
+ if (ret < 0)
+ return ret;
+
dp->plat_data.encoder = encoder;
return analogix_dp_bind(dev, dp->drm_dev, &dp->plat_data);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_core.c b/drivers/gpu/drm/exynos/exynos_drm_core.c
index edbd98ff293e..b0c0621fcdf7 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_core.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_core.c
@@ -13,6 +13,7 @@
*/
#include <drm/drmP.h>
+
#include "exynos_drm_drv.h"
#include "exynos_drm_crtc.h"
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
index d72777f6411a..6ce0821590df 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
@@ -16,12 +16,14 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_encoder.h>
#include "exynos_drm_crtc.h"
#include "exynos_drm_drv.h"
#include "exynos_drm_plane.h"
-static void exynos_drm_crtc_enable(struct drm_crtc *crtc)
+static void exynos_drm_crtc_atomic_enable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
{
struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
@@ -31,7 +33,8 @@ static void exynos_drm_crtc_enable(struct drm_crtc *crtc)
drm_crtc_vblank_on(crtc);
}
-static void exynos_drm_crtc_disable(struct drm_crtc *crtc)
+static void exynos_drm_crtc_atomic_disable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
{
struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
@@ -81,12 +84,24 @@ static void exynos_crtc_atomic_flush(struct drm_crtc *crtc,
exynos_crtc->ops->atomic_flush(exynos_crtc);
}
+static enum drm_mode_status exynos_crtc_mode_valid(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode)
+{
+ struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
+
+ if (exynos_crtc->ops->mode_valid)
+ return exynos_crtc->ops->mode_valid(exynos_crtc, mode);
+
+ return MODE_OK;
+}
+
static const struct drm_crtc_helper_funcs exynos_crtc_helper_funcs = {
- .enable = exynos_drm_crtc_enable,
- .disable = exynos_drm_crtc_disable,
+ .mode_valid = exynos_crtc_mode_valid,
.atomic_check = exynos_crtc_atomic_check,
.atomic_begin = exynos_crtc_atomic_begin,
.atomic_flush = exynos_crtc_atomic_flush,
+ .atomic_enable = exynos_drm_crtc_atomic_enable,
+ .atomic_disable = exynos_drm_crtc_atomic_disable,
};
void exynos_crtc_handle_event(struct exynos_drm_crtc *exynos_crtc)
@@ -189,16 +204,30 @@ err_crtc:
return ERR_PTR(ret);
}
-int exynos_drm_crtc_get_pipe_from_type(struct drm_device *drm_dev,
+struct exynos_drm_crtc *exynos_drm_crtc_get_by_type(struct drm_device *drm_dev,
enum exynos_drm_output_type out_type)
{
struct drm_crtc *crtc;
drm_for_each_crtc(crtc, drm_dev)
if (to_exynos_crtc(crtc)->type == out_type)
- return drm_crtc_index(crtc);
+ return to_exynos_crtc(crtc);
- return -EPERM;
+ return ERR_PTR(-EPERM);
+}
+
+int exynos_drm_set_possible_crtcs(struct drm_encoder *encoder,
+ enum exynos_drm_output_type out_type)
+{
+ struct exynos_drm_crtc *crtc = exynos_drm_crtc_get_by_type(encoder->dev,
+ out_type);
+
+ if (IS_ERR(crtc))
+ return PTR_ERR(crtc);
+
+ encoder->possible_crtcs = drm_crtc_mask(&crtc->base);
+
+ return 0;
}
void exynos_drm_crtc_te_handler(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.h b/drivers/gpu/drm/exynos/exynos_drm_crtc.h
index ef58b64e3d2d..dec446109e6c 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.h
@@ -15,21 +15,25 @@
#ifndef _EXYNOS_DRM_CRTC_H_
#define _EXYNOS_DRM_CRTC_H_
+
#include "exynos_drm_drv.h"
struct exynos_drm_crtc *exynos_drm_crtc_create(struct drm_device *drm_dev,
struct drm_plane *plane,
- enum exynos_drm_output_type type,
+ enum exynos_drm_output_type out_type,
const struct exynos_drm_crtc_ops *ops,
void *context);
void exynos_drm_crtc_wait_pending_update(struct exynos_drm_crtc *exynos_crtc);
void exynos_drm_crtc_finish_update(struct exynos_drm_crtc *exynos_crtc,
struct exynos_drm_plane *exynos_plane);
-/* This function gets pipe value to crtc device matched with out_type. */
-int exynos_drm_crtc_get_pipe_from_type(struct drm_device *drm_dev,
+/* This function gets crtc device matched with out_type. */
+struct exynos_drm_crtc *exynos_drm_crtc_get_by_type(struct drm_device *drm_dev,
enum exynos_drm_output_type out_type);
+int exynos_drm_set_possible_crtcs(struct drm_encoder *encoder,
+ enum exynos_drm_output_type out_type);
+
/*
* This function calls the crtc device(manager)'s te_handler() callback
* to trigger to transfer video image at the tearing effect synchronization
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dpi.c b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
index 63abcd280fa0..66945e0dc57f 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dpi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
@@ -59,7 +59,6 @@ static void exynos_dpi_connector_destroy(struct drm_connector *connector)
}
static const struct drm_connector_funcs exynos_dpi_connector_funcs = {
- .dpms = drm_atomic_helper_connector_dpms,
.detect = exynos_dpi_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = exynos_dpi_connector_destroy,
@@ -203,19 +202,15 @@ int exynos_dpi_bind(struct drm_device *dev, struct drm_encoder *encoder)
{
int ret;
- ret = exynos_drm_crtc_get_pipe_from_type(dev, EXYNOS_DISPLAY_TYPE_LCD);
- if (ret < 0)
- return ret;
-
- encoder->possible_crtcs = 1 << ret;
-
- DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs);
-
drm_encoder_init(dev, encoder, &exynos_dpi_encoder_funcs,
DRM_MODE_ENCODER_TMDS, NULL);
drm_encoder_helper_add(encoder, &exynos_dpi_encoder_helper_funcs);
+ ret = exynos_drm_set_possible_crtcs(encoder, EXYNOS_DISPLAY_TYPE_LCD);
+ if (ret < 0)
+ return ret;
+
ret = exynos_dpi_create_connector(encoder);
if (ret) {
DRM_ERROR("failed to create connector ret = %d\n", ret);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 242bd50faa26..b1f7299600f0 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -145,8 +145,6 @@ static struct drm_driver exynos_drm_driver = {
.gem_free_object_unlocked = exynos_drm_gem_free_object,
.gem_vm_ops = &exynos_drm_gem_vm_ops,
.dumb_create = exynos_drm_gem_dumb_create,
- .dumb_map_offset = exynos_drm_gem_dumb_map_offset,
- .dumb_destroy = drm_gem_dumb_destroy,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_export = drm_gem_prime_export,
@@ -395,8 +393,9 @@ static int exynos_drm_bind(struct device *dev)
/* init kms poll for handling hpd */
drm_kms_helper_poll_init(drm);
- /* force connectors detection */
- drm_helper_hpd_irq_event(drm);
+ ret = exynos_drm_fbdev_init(drm);
+ if (ret)
+ goto err_cleanup_poll;
/* register the DRM device */
ret = drm_dev_register(drm, 0);
@@ -407,6 +406,7 @@ static int exynos_drm_bind(struct device *dev)
err_cleanup_fbdev:
exynos_drm_fbdev_fini(drm);
+err_cleanup_poll:
drm_kms_helper_poll_fini(drm);
exynos_drm_device_subdrv_remove(drm);
err_unbind_all:
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index a93de321706b..cf131c2aa23e 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -91,6 +91,7 @@ struct exynos_drm_plane {
#define EXYNOS_DRM_PLANE_CAP_DOUBLE (1 << 0)
#define EXYNOS_DRM_PLANE_CAP_SCALE (1 << 1)
#define EXYNOS_DRM_PLANE_CAP_ZPOS (1 << 2)
+#define EXYNOS_DRM_PLANE_CAP_TILE (1 << 3)
/*
* Exynos DRM plane configuration structure.
@@ -117,6 +118,7 @@ struct exynos_drm_plane_config {
* @disable: disable the device
* @enable_vblank: specific driver callback for enabling vblank interrupt.
* @disable_vblank: specific driver callback for disabling vblank interrupt.
+ * @mode_valid: specific driver callback for mode validation
* @atomic_check: validate state
* @atomic_begin: prepare device to receive an update
* @atomic_flush: mark the end of device update
@@ -132,6 +134,8 @@ struct exynos_drm_crtc_ops {
int (*enable_vblank)(struct exynos_drm_crtc *crtc);
void (*disable_vblank)(struct exynos_drm_crtc *crtc);
u32 (*get_vblank_counter)(struct exynos_drm_crtc *crtc);
+ enum drm_mode_status (*mode_valid)(struct exynos_drm_crtc *crtc,
+ const struct drm_display_mode *mode);
int (*atomic_check)(struct exynos_drm_crtc *crtc,
struct drm_crtc_state *state);
void (*atomic_begin)(struct exynos_drm_crtc *crtc);
@@ -162,6 +166,7 @@ struct exynos_drm_crtc {
const struct exynos_drm_crtc_ops *ops;
void *ctx;
struct exynos_drm_clk *pipe_clk;
+ bool i80_mode : 1;
};
static inline void exynos_drm_pipe_clk_enable(struct exynos_drm_crtc *crtc,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index b6a46d9a016e..7904ffa9abfb 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -254,7 +254,6 @@ struct exynos_dsi {
struct drm_encoder encoder;
struct mipi_dsi_host dsi_host;
struct drm_connector connector;
- struct device_node *panel_node;
struct drm_panel *panel;
struct device *dev;
@@ -1329,12 +1328,13 @@ static int exynos_dsi_init(struct exynos_dsi *dsi)
return 0;
}
-static int exynos_dsi_register_te_irq(struct exynos_dsi *dsi)
+static int exynos_dsi_register_te_irq(struct exynos_dsi *dsi,
+ struct device *panel)
{
int ret;
int te_gpio_irq;
- dsi->te_gpio = of_get_named_gpio(dsi->panel_node, "te-gpios", 0);
+ dsi->te_gpio = of_get_named_gpio(panel->of_node, "te-gpios", 0);
if (dsi->te_gpio == -ENOENT)
return 0;
@@ -1374,85 +1374,6 @@ static void exynos_dsi_unregister_te_irq(struct exynos_dsi *dsi)
}
}
-static int exynos_dsi_host_attach(struct mipi_dsi_host *host,
- struct mipi_dsi_device *device)
-{
- struct exynos_dsi *dsi = host_to_dsi(host);
-
- dsi->lanes = device->lanes;
- dsi->format = device->format;
- dsi->mode_flags = device->mode_flags;
- dsi->panel_node = device->dev.of_node;
-
- /*
- * This is a temporary solution and should be made by more generic way.
- *
- * If attached panel device is for command mode one, dsi should register
- * TE interrupt handler.
- */
- if (!(dsi->mode_flags & MIPI_DSI_MODE_VIDEO)) {
- int ret = exynos_dsi_register_te_irq(dsi);
-
- if (ret)
- return ret;
- }
-
- if (dsi->connector.dev)
- drm_helper_hpd_irq_event(dsi->connector.dev);
-
- return 0;
-}
-
-static int exynos_dsi_host_detach(struct mipi_dsi_host *host,
- struct mipi_dsi_device *device)
-{
- struct exynos_dsi *dsi = host_to_dsi(host);
-
- exynos_dsi_unregister_te_irq(dsi);
-
- dsi->panel_node = NULL;
-
- if (dsi->connector.dev)
- drm_helper_hpd_irq_event(dsi->connector.dev);
-
- return 0;
-}
-
-static ssize_t exynos_dsi_host_transfer(struct mipi_dsi_host *host,
- const struct mipi_dsi_msg *msg)
-{
- struct exynos_dsi *dsi = host_to_dsi(host);
- struct exynos_dsi_transfer xfer;
- int ret;
-
- if (!(dsi->state & DSIM_STATE_ENABLED))
- return -EINVAL;
-
- if (!(dsi->state & DSIM_STATE_INITIALIZED)) {
- ret = exynos_dsi_init(dsi);
- if (ret)
- return ret;
- dsi->state |= DSIM_STATE_INITIALIZED;
- }
-
- ret = mipi_dsi_create_packet(&xfer.packet, msg);
- if (ret < 0)
- return ret;
-
- xfer.rx_len = msg->rx_len;
- xfer.rx_payload = msg->rx_buf;
- xfer.flags = msg->flags;
-
- ret = exynos_dsi_transfer(dsi, &xfer);
- return (ret < 0) ? ret : xfer.rx_done;
-}
-
-static const struct mipi_dsi_host_ops exynos_dsi_ops = {
- .attach = exynos_dsi_host_attach,
- .detach = exynos_dsi_host_detach,
- .transfer = exynos_dsi_host_transfer,
-};
-
static void exynos_dsi_enable(struct drm_encoder *encoder)
{
struct exynos_dsi *dsi = encoder_to_dsi(encoder);
@@ -1508,25 +1429,7 @@ static void exynos_dsi_disable(struct drm_encoder *encoder)
static enum drm_connector_status
exynos_dsi_detect(struct drm_connector *connector, bool force)
{
- struct exynos_dsi *dsi = connector_to_dsi(connector);
-
- if (!dsi->panel) {
- dsi->panel = of_drm_find_panel(dsi->panel_node);
- if (dsi->panel)
- drm_panel_attach(dsi->panel, &dsi->connector);
- } else if (!dsi->panel_node) {
- struct drm_encoder *encoder;
-
- encoder = platform_get_drvdata(to_platform_device(dsi->dev));
- exynos_dsi_disable(encoder);
- drm_panel_detach(dsi->panel);
- dsi->panel = NULL;
- }
-
- if (dsi->panel)
- return connector_status_connected;
-
- return connector_status_disconnected;
+ return connector->status;
}
static void exynos_dsi_connector_destroy(struct drm_connector *connector)
@@ -1537,7 +1440,6 @@ static void exynos_dsi_connector_destroy(struct drm_connector *connector)
}
static const struct drm_connector_funcs exynos_dsi_connector_funcs = {
- .dpms = drm_atomic_helper_connector_dpms,
.detect = exynos_dsi_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = exynos_dsi_connector_destroy,
@@ -1576,6 +1478,7 @@ static int exynos_dsi_create_connector(struct drm_encoder *encoder)
return ret;
}
+ connector->status = connector_status_disconnected;
drm_connector_helper_add(connector, &exynos_dsi_connector_helper_funcs);
drm_mode_connector_attach_encoder(connector, encoder);
@@ -1612,14 +1515,112 @@ static const struct drm_encoder_funcs exynos_dsi_encoder_funcs = {
MODULE_DEVICE_TABLE(of, exynos_dsi_of_match);
+static int exynos_dsi_host_attach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *device)
+{
+ struct exynos_dsi *dsi = host_to_dsi(host);
+ struct drm_device *drm = dsi->connector.dev;
+
+ /*
+ * This is a temporary solution and should be made by more generic way.
+ *
+ * If attached panel device is for command mode one, dsi should register
+ * TE interrupt handler.
+ */
+ if (!(device->mode_flags & MIPI_DSI_MODE_VIDEO)) {
+ int ret = exynos_dsi_register_te_irq(dsi, &device->dev);
+ if (ret)
+ return ret;
+ }
+
+ mutex_lock(&drm->mode_config.mutex);
+
+ dsi->lanes = device->lanes;
+ dsi->format = device->format;
+ dsi->mode_flags = device->mode_flags;
+ dsi->panel = of_drm_find_panel(device->dev.of_node);
+ if (dsi->panel) {
+ drm_panel_attach(dsi->panel, &dsi->connector);
+ dsi->connector.status = connector_status_connected;
+ }
+ exynos_drm_crtc_get_by_type(drm, EXYNOS_DISPLAY_TYPE_LCD)->i80_mode =
+ !(dsi->mode_flags & MIPI_DSI_MODE_VIDEO);
+
+ mutex_unlock(&drm->mode_config.mutex);
+
+ if (drm->mode_config.poll_enabled)
+ drm_kms_helper_hotplug_event(drm);
+
+ return 0;
+}
+
+static int exynos_dsi_host_detach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *device)
+{
+ struct exynos_dsi *dsi = host_to_dsi(host);
+ struct drm_device *drm = dsi->connector.dev;
+
+ mutex_lock(&drm->mode_config.mutex);
+
+ if (dsi->panel) {
+ exynos_dsi_disable(&dsi->encoder);
+ drm_panel_detach(dsi->panel);
+ dsi->panel = NULL;
+ dsi->connector.status = connector_status_disconnected;
+ }
+
+ mutex_unlock(&drm->mode_config.mutex);
+
+ if (drm->mode_config.poll_enabled)
+ drm_kms_helper_hotplug_event(drm);
+
+ exynos_dsi_unregister_te_irq(dsi);
+
+ return 0;
+}
+
+static ssize_t exynos_dsi_host_transfer(struct mipi_dsi_host *host,
+ const struct mipi_dsi_msg *msg)
+{
+ struct exynos_dsi *dsi = host_to_dsi(host);
+ struct exynos_dsi_transfer xfer;
+ int ret;
+
+ if (!(dsi->state & DSIM_STATE_ENABLED))
+ return -EINVAL;
+
+ if (!(dsi->state & DSIM_STATE_INITIALIZED)) {
+ ret = exynos_dsi_init(dsi);
+ if (ret)
+ return ret;
+ dsi->state |= DSIM_STATE_INITIALIZED;
+ }
+
+ ret = mipi_dsi_create_packet(&xfer.packet, msg);
+ if (ret < 0)
+ return ret;
+
+ xfer.rx_len = msg->rx_len;
+ xfer.rx_payload = msg->rx_buf;
+ xfer.flags = msg->flags;
+
+ ret = exynos_dsi_transfer(dsi, &xfer);
+ return (ret < 0) ? ret : xfer.rx_done;
+}
+
+static const struct mipi_dsi_host_ops exynos_dsi_ops = {
+ .attach = exynos_dsi_host_attach,
+ .detach = exynos_dsi_host_detach,
+ .transfer = exynos_dsi_host_transfer,
+};
+
static int exynos_dsi_of_read_u32(const struct device_node *np,
const char *propname, u32 *out_value)
{
int ret = of_property_read_u32(np, propname, out_value);
if (ret < 0)
- pr_err("%s: failed to get '%s' property\n", np->full_name,
- propname);
+ pr_err("%pOF: failed to get '%s' property\n", np, propname);
return ret;
}
@@ -1664,20 +1665,15 @@ static int exynos_dsi_bind(struct device *dev, struct device *master,
struct drm_bridge *bridge;
int ret;
- ret = exynos_drm_crtc_get_pipe_from_type(drm_dev,
- EXYNOS_DISPLAY_TYPE_LCD);
- if (ret < 0)
- return ret;
-
- encoder->possible_crtcs = 1 << ret;
-
- DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs);
-
drm_encoder_init(drm_dev, encoder, &exynos_dsi_encoder_funcs,
DRM_MODE_ENCODER_TMDS, NULL);
drm_encoder_helper_add(encoder, &exynos_dsi_encoder_helper_funcs);
+ ret = exynos_drm_set_possible_crtcs(encoder, EXYNOS_DISPLAY_TYPE_LCD);
+ if (ret < 0)
+ return ret;
+
ret = exynos_dsi_create_connector(encoder);
if (ret) {
DRM_ERROR("failed to create connector ret = %d\n", ret);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index 73217c281c9a..8208df56a88f 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -199,33 +199,8 @@ dma_addr_t exynos_drm_fb_dma_addr(struct drm_framebuffer *fb, int index)
return exynos_fb->dma_addr[index];
}
-static void exynos_drm_atomic_commit_tail(struct drm_atomic_state *state)
-{
- struct drm_device *dev = state->dev;
-
- drm_atomic_helper_commit_modeset_disables(dev, state);
-
- drm_atomic_helper_commit_modeset_enables(dev, state);
-
- /*
- * Exynos can't update planes with CRTCs and encoders disabled,
- * its updates routines, specially for FIMD, requires the clocks
- * to be enabled. So it is necessary to handle the modeset operations
- * *before* the commit_planes() step, this way it will always
- * have the relevant clocks enabled to perform the update.
- */
- drm_atomic_helper_commit_planes(dev, state,
- DRM_PLANE_COMMIT_ACTIVE_ONLY);
-
- drm_atomic_helper_commit_hw_done(state);
-
- drm_atomic_helper_wait_for_vblanks(dev, state);
-
- drm_atomic_helper_cleanup_planes(dev, state);
-}
-
static struct drm_mode_config_helper_funcs exynos_drm_mode_config_helpers = {
- .atomic_commit_tail = exynos_drm_atomic_commit_tail,
+ .atomic_commit_tail = drm_atomic_helper_commit_tail_rpm,
};
static const struct drm_mode_config_funcs exynos_drm_mode_config_funcs = {
@@ -250,4 +225,6 @@ void exynos_drm_mode_config_init(struct drm_device *dev)
dev->mode_config.funcs = &exynos_drm_mode_config_funcs;
dev->mode_config.helper_private = &exynos_drm_mode_config_helpers;
+
+ dev->mode_config.allow_fb_modifiers = true;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index 641531243e04..c3a068409b48 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -183,24 +183,6 @@ static const struct drm_fb_helper_funcs exynos_drm_fb_helper_funcs = {
.fb_probe = exynos_drm_fbdev_create,
};
-static bool exynos_drm_fbdev_is_anything_connected(struct drm_device *dev)
-{
- struct drm_connector *connector;
- bool ret = false;
-
- mutex_lock(&dev->mode_config.mutex);
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- if (connector->status != connector_status_connected)
- continue;
-
- ret = true;
- break;
- }
- mutex_unlock(&dev->mode_config.mutex);
-
- return ret;
-}
-
int exynos_drm_fbdev_init(struct drm_device *dev)
{
struct exynos_drm_fbdev *fbdev;
@@ -211,9 +193,6 @@ int exynos_drm_fbdev_init(struct drm_device *dev)
if (!dev->mode_config.num_crtc || !dev->mode_config.num_connector)
return 0;
- if (!exynos_drm_fbdev_is_anything_connected(dev))
- return 0;
-
fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL);
if (!fbdev)
return -ENOMEM;
@@ -304,8 +283,5 @@ void exynos_drm_output_poll_changed(struct drm_device *dev)
struct exynos_drm_private *private = dev->dev_private;
struct drm_fb_helper *fb_helper = private->fb_helper;
- if (fb_helper)
- drm_fb_helper_hotplug_event(fb_helper);
- else
- exynos_drm_fbdev_init(dev);
+ drm_fb_helper_hotplug_event(fb_helper);
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 60f93cad6643..d42ae2bc3e56 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -583,18 +583,12 @@ static void fimd_win_set_pixfmt(struct fimd_context *ctx, unsigned int win,
val |= WINCONx_BURSTLEN_16WORD;
break;
case DRM_FORMAT_ARGB8888:
+ default:
val |= WINCON1_BPPMODE_25BPP_A1888
| WINCON1_BLD_PIX | WINCON1_ALPHA_SEL;
val |= WINCONx_WSWP;
val |= WINCONx_BURSTLEN_16WORD;
break;
- default:
- DRM_DEBUG_KMS("invalid pixel size so using unpacked 24bpp.\n");
-
- val |= WINCON0_BPPMODE_24BPP_888;
- val |= WINCONx_WSWP;
- val |= WINCONx_BURSTLEN_16WORD;
- break;
}
/*
@@ -718,13 +712,13 @@ static void fimd_update_plane(struct exynos_drm_crtc *crtc,
unsigned long val, size, offset;
unsigned int last_x, last_y, buf_offsize, line_size;
unsigned int win = plane->index;
- unsigned int bpp = fb->format->cpp[0];
+ unsigned int cpp = fb->format->cpp[0];
unsigned int pitch = fb->pitches[0];
if (ctx->suspended)
return;
- offset = state->src.x * bpp;
+ offset = state->src.x * cpp;
offset += state->src.y * pitch;
/* buffer start address */
@@ -743,8 +737,8 @@ static void fimd_update_plane(struct exynos_drm_crtc *crtc,
state->crtc.w, state->crtc.h);
/* buffer size */
- buf_offsize = pitch - (state->crtc.w * bpp);
- line_size = state->crtc.w * bpp;
+ buf_offsize = pitch - (state->crtc.w * cpp);
+ line_size = state->crtc.w * cpp;
val = VIDW_BUF_SIZE_OFFSET(buf_offsize) |
VIDW_BUF_SIZE_PAGEWIDTH(line_size) |
VIDW_BUF_SIZE_OFFSET_E(buf_offsize) |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index c23479be4850..077de014d610 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -286,8 +286,8 @@ int exynos_drm_gem_map_ioctl(struct drm_device *dev, void *data,
{
struct drm_exynos_gem_map *args = data;
- return exynos_drm_gem_dumb_map_offset(file_priv, dev, args->handle,
- &args->offset);
+ return drm_gem_dumb_map_offset(file_priv, dev, args->handle,
+ &args->offset);
}
dma_addr_t *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
@@ -422,32 +422,6 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
return 0;
}
-int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
- struct drm_device *dev, uint32_t handle,
- uint64_t *offset)
-{
- struct drm_gem_object *obj;
- int ret = 0;
-
- /*
- * get offset of memory allocated for drm framebuffer.
- * - this callback would be called by user application
- * with DRM_IOCTL_MODE_MAP_DUMB command.
- */
-
- obj = drm_gem_object_lookup(file_priv, handle);
- if (!obj) {
- DRM_ERROR("failed to lookup gem object.\n");
- return -EINVAL;
- }
-
- *offset = drm_vma_node_offset_addr(&obj->vma_node);
- DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset);
-
- drm_gem_object_unreference_unlocked(obj);
- return ret;
-}
-
int exynos_drm_gem_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h
index 85457255fcd1..e86d1a9518c3 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h
@@ -110,11 +110,6 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
-/* map memory region for drm framebuffer to user space. */
-int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
- struct drm_device *dev, uint32_t handle,
- uint64_t *offset);
-
/* page fault handler and mmap fault address(virtual) to physical memory. */
int exynos_drm_gem_fault(struct vm_fault *vmf);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_mic.c b/drivers/gpu/drm/exynos/exynos_drm_mic.c
index 16bbee897e0d..ba4a32b132ba 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_mic.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_mic.c
@@ -21,9 +21,12 @@
#include <linux/component.h>
#include <linux/pm_runtime.h>
#include <drm/drmP.h>
+#include <drm/drm_encoder.h>
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
+#include "exynos_drm_drv.h"
+
/* Sysreg registers for MIC */
#define DSD_CFG_MUX 0x1004
#define MIC0_RGB_MUX (1 << 0)
@@ -85,12 +88,6 @@
#define MIC_BS_SIZE_2D(x) ((x) & 0x3fff)
-enum {
- ENDPOINT_DECON_NODE,
- ENDPOINT_DSI_NODE,
- NUM_ENDPOINTS
-};
-
static char *clk_names[] = { "pclk_mic0", "sclk_rgb_vclk_to_mic0" };
#define NUM_CLKS ARRAY_SIZE(clk_names)
static DEFINE_MUTEX(mic_mutex);
@@ -229,36 +226,6 @@ static void mic_set_reg_on(struct exynos_mic *mic, bool enable)
writel(reg, mic->reg + MIC_OP);
}
-static int parse_dt(struct exynos_mic *mic)
-{
- int ret = 0, i, j;
- struct device_node *remote_node;
- struct device_node *nodes[3];
-
- /*
- * The order of endpoints does matter.
- * The first node must be for decon and the second one must be for dsi.
- */
- for (i = 0, j = 0; i < NUM_ENDPOINTS; i++) {
- remote_node = of_graph_get_remote_node(mic->dev->of_node, i, 0);
- if (!remote_node) {
- ret = -EPIPE;
- goto exit;
- }
- nodes[j++] = remote_node;
-
- if (i == ENDPOINT_DECON_NODE &&
- of_get_child_by_name(remote_node, "i80-if-timings"))
- mic->i80_mode = 1;
- }
-
-exit:
- while (--j > -1)
- of_node_put(nodes[j]);
-
- return ret;
-}
-
static void mic_disable(struct drm_bridge *bridge) { }
static void mic_post_disable(struct drm_bridge *bridge)
@@ -286,6 +253,7 @@ static void mic_mode_set(struct drm_bridge *bridge,
mutex_lock(&mic_mutex);
drm_display_mode_to_videomode(mode, &mic->vm);
+ mic->i80_mode = to_exynos_crtc(bridge->encoder->crtc)->i80_mode;
mutex_unlock(&mic_mutex);
}
@@ -417,10 +385,6 @@ static int exynos_mic_probe(struct platform_device *pdev)
mic->dev = dev;
- ret = parse_dt(mic);
- if (ret)
- goto err;
-
ret = of_address_to_resource(dev->of_node, 0, &res);
if (ret) {
DRM_ERROR("mic: Failed to get mem region for MIC\n");
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c
index 611b6fd65433..d2a90dae5c71 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_plane.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c
@@ -173,13 +173,35 @@ static struct drm_plane_funcs exynos_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = drm_plane_cleanup,
- .set_property = drm_atomic_helper_plane_set_property,
.reset = exynos_drm_plane_reset,
.atomic_duplicate_state = exynos_drm_plane_duplicate_state,
.atomic_destroy_state = exynos_drm_plane_destroy_state,
};
static int
+exynos_drm_plane_check_format(const struct exynos_drm_plane_config *config,
+ struct exynos_drm_plane_state *state)
+{
+ struct drm_framebuffer *fb = state->base.fb;
+
+ switch (fb->modifier) {
+ case DRM_FORMAT_MOD_SAMSUNG_64_32_TILE:
+ if (!(config->capabilities & EXYNOS_DRM_PLANE_CAP_TILE))
+ return -ENOTSUPP;
+ break;
+
+ case DRM_FORMAT_MOD_LINEAR:
+ break;
+
+ default:
+ DRM_ERROR("unsupported pixel format modifier");
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+static int
exynos_drm_plane_check_size(const struct exynos_drm_plane_config *config,
struct exynos_drm_plane_state *state)
{
@@ -223,6 +245,10 @@ static int exynos_plane_atomic_check(struct drm_plane *plane,
/* translate state into exynos_state */
exynos_plane_mode_set(exynos_state);
+ ret = exynos_drm_plane_check_format(exynos_plane->config, exynos_state);
+ if (ret)
+ return ret;
+
ret = exynos_drm_plane_check_size(exynos_plane->config, exynos_state);
return ret;
}
@@ -283,7 +309,7 @@ int exynos_plane_init(struct drm_device *dev,
&exynos_plane_funcs,
config->pixel_formats,
config->num_pixel_formats,
- config->type, NULL);
+ NULL, config->type, NULL);
if (err) {
DRM_ERROR("failed to initialize plane\n");
return err;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index cb8a72842537..53e03f8af3d5 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -289,7 +289,6 @@ static void vidi_connector_destroy(struct drm_connector *connector)
}
static const struct drm_connector_funcs vidi_connector_funcs = {
- .dpms = drm_atomic_helper_connector_dpms,
.fill_modes = drm_helper_probe_single_connector_modes,
.detect = vidi_detect,
.destroy = vidi_connector_destroy,
@@ -382,7 +381,7 @@ static int vidi_bind(struct device *dev, struct device *master, void *data)
struct exynos_drm_plane *exynos_plane;
struct exynos_drm_plane_config plane_config = { 0 };
unsigned int i;
- int pipe, ret;
+ int ret;
ctx->drm_dev = drm_dev;
@@ -407,20 +406,15 @@ static int vidi_bind(struct device *dev, struct device *master, void *data)
return PTR_ERR(ctx->crtc);
}
- pipe = exynos_drm_crtc_get_pipe_from_type(drm_dev,
- EXYNOS_DISPLAY_TYPE_VIDI);
- if (pipe < 0)
- return pipe;
-
- encoder->possible_crtcs = 1 << pipe;
-
- DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs);
-
drm_encoder_init(drm_dev, encoder, &exynos_vidi_encoder_funcs,
DRM_MODE_ENCODER_TMDS, NULL);
drm_encoder_helper_add(encoder, &exynos_vidi_encoder_helper_funcs);
+ ret = exynos_drm_set_possible_crtcs(encoder, EXYNOS_DISPLAY_TYPE_VIDI);
+ if (ret < 0)
+ return ret;
+
ret = vidi_create_connector(encoder);
if (ret) {
DRM_ERROR("failed to create connector ret = %d\n", ret);
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index d3b69d66736f..214fa5e51963 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -784,7 +784,7 @@ static void hdmi_reg_infoframes(struct hdmi_context *hdata)
}
ret = drm_hdmi_avi_infoframe_from_display_mode(&frm.avi,
- &hdata->current_mode);
+ &hdata->current_mode, false);
if (!ret)
ret = hdmi_avi_infoframe_pack(&frm.avi, buf, sizeof(buf));
if (ret > 0) {
@@ -835,7 +835,6 @@ static void hdmi_connector_destroy(struct drm_connector *connector)
}
static const struct drm_connector_funcs hdmi_connector_funcs = {
- .dpms = drm_atomic_helper_connector_dpms,
.fill_modes = drm_helper_probe_single_connector_modes,
.detect = hdmi_detect,
.destroy = hdmi_connector_destroy,
@@ -1698,32 +1697,25 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data)
struct drm_device *drm_dev = data;
struct hdmi_context *hdata = dev_get_drvdata(dev);
struct drm_encoder *encoder = &hdata->encoder;
- struct exynos_drm_crtc *exynos_crtc;
- struct drm_crtc *crtc;
- int ret, pipe;
+ struct exynos_drm_crtc *crtc;
+ int ret;
hdata->drm_dev = drm_dev;
- pipe = exynos_drm_crtc_get_pipe_from_type(drm_dev,
- EXYNOS_DISPLAY_TYPE_HDMI);
- if (pipe < 0)
- return pipe;
-
hdata->phy_clk.enable = hdmiphy_clk_enable;
- crtc = drm_crtc_from_index(drm_dev, pipe);
- exynos_crtc = to_exynos_crtc(crtc);
- exynos_crtc->pipe_clk = &hdata->phy_clk;
-
- encoder->possible_crtcs = 1 << pipe;
-
- DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs);
-
drm_encoder_init(drm_dev, encoder, &exynos_hdmi_encoder_funcs,
DRM_MODE_ENCODER_TMDS, NULL);
drm_encoder_helper_add(encoder, &exynos_hdmi_encoder_helper_funcs);
+ ret = exynos_drm_set_possible_crtcs(encoder, EXYNOS_DISPLAY_TYPE_HDMI);
+ if (ret < 0)
+ return ret;
+
+ crtc = exynos_drm_crtc_get_by_type(drm_dev, EXYNOS_DISPLAY_TYPE_HDMI);
+ crtc->pipe_clk = &hdata->phy_clk;
+
ret = hdmi_create_connector(encoder);
if (ret) {
DRM_ERROR("failed to create connector ret = %d\n", ret);
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index a998a8dd783c..002755415e00 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -148,7 +148,8 @@ static const struct exynos_drm_plane_config plane_configs[MIXER_WIN_NR] = {
.pixel_formats = vp_formats,
.num_pixel_formats = ARRAY_SIZE(vp_formats),
.capabilities = EXYNOS_DRM_PLANE_CAP_SCALE |
- EXYNOS_DRM_PLANE_CAP_ZPOS,
+ EXYNOS_DRM_PLANE_CAP_ZPOS |
+ EXYNOS_DRM_PLANE_CAP_TILE,
},
};
@@ -483,29 +484,18 @@ static void vp_video_buffer(struct mixer_context *ctx,
unsigned int priority = state->base.normalized_zpos + 1;
unsigned long flags;
dma_addr_t luma_addr[2], chroma_addr[2];
- bool tiled_mode = false;
- bool crcb_mode = false;
+ bool is_tiled, is_nv21;
u32 val;
- switch (fb->format->format) {
- case DRM_FORMAT_NV12:
- crcb_mode = false;
- break;
- case DRM_FORMAT_NV21:
- crcb_mode = true;
- break;
- default:
- DRM_ERROR("pixel format for vp is wrong [%d].\n",
- fb->format->format);
- return;
- }
+ is_nv21 = (fb->format->format == DRM_FORMAT_NV21);
+ is_tiled = (fb->modifier == DRM_FORMAT_MOD_SAMSUNG_64_32_TILE);
luma_addr[0] = exynos_drm_fb_dma_addr(fb, 0);
chroma_addr[0] = exynos_drm_fb_dma_addr(fb, 1);
if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
__set_bit(MXR_BIT_INTERLACE, &ctx->flags);
- if (tiled_mode) {
+ if (is_tiled) {
luma_addr[1] = luma_addr[0] + 0x40;
chroma_addr[1] = chroma_addr[0] + 0x40;
} else {
@@ -525,14 +515,14 @@ static void vp_video_buffer(struct mixer_context *ctx,
vp_reg_writemask(res, VP_MODE, val, VP_MODE_LINE_SKIP);
/* setup format */
- val = (crcb_mode ? VP_MODE_NV21 : VP_MODE_NV12);
- val |= (tiled_mode ? VP_MODE_MEM_TILED : VP_MODE_MEM_LINEAR);
+ val = (is_nv21 ? VP_MODE_NV21 : VP_MODE_NV12);
+ val |= (is_tiled ? VP_MODE_MEM_TILED : VP_MODE_MEM_LINEAR);
vp_reg_writemask(res, VP_MODE, val, VP_MODE_FMT_MASK);
/* setting size of input image */
vp_reg_write(res, VP_IMG_SIZE_Y, VP_IMG_HSIZE(fb->pitches[0]) |
VP_IMG_VSIZE(fb->height));
- /* chroma height has to reduced by 2 to avoid chroma distorions */
+ /* chroma plane for NV12/NV21 is half the height of the luma plane */
vp_reg_write(res, VP_IMG_SIZE_C, VP_IMG_HSIZE(fb->pitches[0]) |
VP_IMG_VSIZE(fb->height / 2));
@@ -594,7 +584,7 @@ static void mixer_graph_buffer(struct mixer_context *ctx,
unsigned long flags;
unsigned int win = plane->index;
unsigned int x_ratio = 0, y_ratio = 0;
- unsigned int src_x_offset, src_y_offset, dst_x_offset, dst_y_offset;
+ unsigned int dst_x_offset, dst_y_offset;
dma_addr_t dma_addr;
unsigned int fmt;
u32 val;
@@ -616,12 +606,9 @@ static void mixer_graph_buffer(struct mixer_context *ctx,
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_ARGB8888:
+ default:
fmt = MXR_FORMAT_ARGB8888;
break;
-
- default:
- DRM_DEBUG_KMS("pixelformat unsupported by mixer\n");
- return;
}
/* ratio is already checked by common plane code */
@@ -631,12 +618,10 @@ static void mixer_graph_buffer(struct mixer_context *ctx,
dst_x_offset = state->crtc.x;
dst_y_offset = state->crtc.y;
- /* converting dma address base and source offset */
+ /* translate dma address base s.t. the source image offset is zero */
dma_addr = exynos_drm_fb_dma_addr(fb, 0)
+ (state->src.x * fb->format->cpp[0])
+ (state->src.y * fb->pitches[0]);
- src_x_offset = 0;
- src_y_offset = 0;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
__set_bit(MXR_BIT_INTERLACE, &ctx->flags);
@@ -667,11 +652,6 @@ static void mixer_graph_buffer(struct mixer_context *ctx,
val |= MXR_GRP_WH_V_SCALE(y_ratio);
mixer_reg_write(res, MXR_GRAPHIC_WH(win), val);
- /* setup offsets in source image */
- val = MXR_GRP_SXY_SX(src_x_offset);
- val |= MXR_GRP_SXY_SY(src_y_offset);
- mixer_reg_write(res, MXR_GRAPHIC_SXY(win), val);
-
/* setup offsets in display image */
val = MXR_GRP_DXY_DX(dst_x_offset);
val |= MXR_GRP_DXY_DY(dst_y_offset);
@@ -748,6 +728,10 @@ static void mixer_win_reset(struct mixer_context *ctx)
if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags))
mixer_reg_writemask(res, MXR_CFG, 0, MXR_CFG_VP_ENABLE);
+ /* set all source image offsets to zero */
+ mixer_reg_write(res, MXR_GRAPHIC_SXY(0), 0);
+ mixer_reg_write(res, MXR_GRAPHIC_SXY(1), 0);
+
spin_unlock_irqrestore(&res->reg_slock, flags);
}
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
index cc4e944a1d3c..0e3752437e44 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
@@ -63,7 +63,8 @@ static void fsl_dcu_drm_crtc_atomic_disable(struct drm_crtc *crtc,
clk_disable_unprepare(fsl_dev->pix_clk);
}
-static void fsl_dcu_drm_crtc_enable(struct drm_crtc *crtc)
+static void fsl_dcu_drm_crtc_atomic_enable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
{
struct drm_device *dev = crtc->dev;
struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
@@ -133,7 +134,7 @@ static void fsl_dcu_drm_crtc_mode_set_nofb(struct drm_crtc *crtc)
static const struct drm_crtc_helper_funcs fsl_dcu_drm_crtc_helper_funcs = {
.atomic_disable = fsl_dcu_drm_crtc_atomic_disable,
.atomic_flush = fsl_dcu_drm_crtc_atomic_flush,
- .enable = fsl_dcu_drm_crtc_enable,
+ .atomic_enable = fsl_dcu_drm_crtc_atomic_enable,
.mode_set_nofb = fsl_dcu_drm_crtc_mode_set_nofb,
};
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
index 5cbde196895a..58e9e0601a61 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
@@ -176,8 +176,6 @@ static struct drm_driver fsl_dcu_drm_driver = {
.gem_prime_vunmap = drm_gem_cma_prime_vunmap,
.gem_prime_mmap = drm_gem_cma_prime_mmap,
.dumb_create = drm_gem_cma_dumb_create,
- .dumb_map_offset = drm_gem_cma_dumb_map_offset,
- .dumb_destroy = drm_gem_dumb_destroy,
.fops = &fsl_dcu_drm_fops,
.name = "fsl-dcu-drm",
.desc = "Freescale DCU DRM",
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
index 0a20723aa6e1..9554b245746e 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
@@ -224,7 +224,7 @@ struct drm_plane *fsl_dcu_drm_primary_create_plane(struct drm_device *dev)
&fsl_dcu_drm_plane_funcs,
fsl_dcu_drm_plane_formats,
ARRAY_SIZE(fsl_dcu_drm_plane_formats),
- DRM_PLANE_TYPE_PRIMARY, NULL);
+ NULL, DRM_PLANE_TYPE_PRIMARY, NULL);
if (ret) {
kfree(primary);
primary = NULL;
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
index dcbf3c06e1d8..edd7d8127d19 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
@@ -63,7 +63,6 @@ static const struct drm_connector_funcs fsl_dcu_drm_connector_funcs = {
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
.destroy = fsl_dcu_drm_connector_destroy,
- .dpms = drm_atomic_helper_connector_dpms,
.fill_modes = drm_helper_probe_single_connector_modes,
.reset = drm_atomic_helper_connector_reset,
};
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index 7da70b6c83f0..2570c7f647a6 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -479,26 +479,6 @@ static struct drm_framebuffer *psb_user_framebuffer_create
return psb_framebuffer_create(dev, cmd, r);
}
-static void psbfb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
- u16 blue, int regno)
-{
- struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
-
- gma_crtc->lut_r[regno] = red >> 8;
- gma_crtc->lut_g[regno] = green >> 8;
- gma_crtc->lut_b[regno] = blue >> 8;
-}
-
-static void psbfb_gamma_get(struct drm_crtc *crtc, u16 *red,
- u16 *green, u16 *blue, int regno)
-{
- struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
-
- *red = gma_crtc->lut_r[regno] << 8;
- *green = gma_crtc->lut_g[regno] << 8;
- *blue = gma_crtc->lut_b[regno] << 8;
-}
-
static int psbfb_probe(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes)
{
@@ -525,8 +505,6 @@ static int psbfb_probe(struct drm_fb_helper *helper,
}
static const struct drm_fb_helper_funcs psb_fb_helper_funcs = {
- .gamma_set = psbfb_gamma_set,
- .gamma_get = psbfb_gamma_get,
.fb_probe = psbfb_probe,
};
diff --git a/drivers/gpu/drm/gma500/gem.c b/drivers/gpu/drm/gma500/gem.c
index 7da061aab729..131239759a75 100644
--- a/drivers/gpu/drm/gma500/gem.c
+++ b/drivers/gpu/drm/gma500/gem.c
@@ -48,36 +48,6 @@ int psb_gem_get_aperture(struct drm_device *dev, void *data,
}
/**
- * psb_gem_dumb_map_gtt - buffer mapping for dumb interface
- * @file: our drm client file
- * @dev: drm device
- * @handle: GEM handle to the object (from dumb_create)
- *
- * Do the necessary setup to allow the mapping of the frame buffer
- * into user memory. We don't have to do much here at the moment.
- */
-int psb_gem_dumb_map_gtt(struct drm_file *file, struct drm_device *dev,
- uint32_t handle, uint64_t *offset)
-{
- int ret = 0;
- struct drm_gem_object *obj;
-
- /* GEM does all our handle to object mapping */
- obj = drm_gem_object_lookup(file, handle);
- if (obj == NULL)
- return -ENOENT;
-
- /* Make it mmapable */
- ret = drm_gem_create_mmap_offset(obj);
- if (ret)
- goto out;
- *offset = drm_vma_node_offset_addr(&obj->vma_node);
-out:
- drm_gem_object_unreference_unlocked(obj);
- return ret;
-}
-
-/**
* psb_gem_create - create a mappable object
* @file: the DRM file of the client
* @dev: our device
diff --git a/drivers/gpu/drm/gma500/gma_display.c b/drivers/gpu/drm/gma500/gma_display.c
index e7fd356acf2e..f3c48a2be71b 100644
--- a/drivers/gpu/drm/gma500/gma_display.c
+++ b/drivers/gpu/drm/gma500/gma_display.c
@@ -144,33 +144,32 @@ void gma_crtc_load_lut(struct drm_crtc *crtc)
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
const struct psb_offset *map = &dev_priv->regmap[gma_crtc->pipe];
int palreg = map->palette;
+ u16 *r, *g, *b;
int i;
/* The clocks have to be on to load the palette. */
if (!crtc->enabled)
return;
+ r = crtc->gamma_store;
+ g = r + crtc->gamma_size;
+ b = g + crtc->gamma_size;
+
if (gma_power_begin(dev, false)) {
for (i = 0; i < 256; i++) {
REG_WRITE(palreg + 4 * i,
- ((gma_crtc->lut_r[i] +
- gma_crtc->lut_adj[i]) << 16) |
- ((gma_crtc->lut_g[i] +
- gma_crtc->lut_adj[i]) << 8) |
- (gma_crtc->lut_b[i] +
- gma_crtc->lut_adj[i]));
+ (((*r++ >> 8) + gma_crtc->lut_adj[i]) << 16) |
+ (((*g++ >> 8) + gma_crtc->lut_adj[i]) << 8) |
+ ((*b++ >> 8) + gma_crtc->lut_adj[i]));
}
gma_power_end(dev);
} else {
for (i = 0; i < 256; i++) {
/* FIXME: Why pipe[0] and not pipe[..._crtc->pipe]? */
dev_priv->regs.pipe[0].palette[i] =
- ((gma_crtc->lut_r[i] +
- gma_crtc->lut_adj[i]) << 16) |
- ((gma_crtc->lut_g[i] +
- gma_crtc->lut_adj[i]) << 8) |
- (gma_crtc->lut_b[i] +
- gma_crtc->lut_adj[i]);
+ (((*r++ >> 8) + gma_crtc->lut_adj[i]) << 16) |
+ (((*g++ >> 8) + gma_crtc->lut_adj[i]) << 8) |
+ ((*b++ >> 8) + gma_crtc->lut_adj[i]);
}
}
@@ -180,15 +179,6 @@ int gma_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, u16 *blue,
u32 size,
struct drm_modeset_acquire_ctx *ctx)
{
- struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
- int i;
-
- for (i = 0; i < size; i++) {
- gma_crtc->lut_r[i] = red[i] >> 8;
- gma_crtc->lut_g[i] = green[i] >> 8;
- gma_crtc->lut_b[i] = blue[i] >> 8;
- }
-
gma_crtc_load_lut(crtc);
return 0;
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c b/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c
index 1616af209bfc..c50534c923df 100644
--- a/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c
@@ -520,7 +520,7 @@ static int __read_panel_data(struct mdfld_dsi_pkg_sender *sender, u8 data_type,
u8 *data, u16 len, u32 *data_out, u16 len_out, bool hs)
{
unsigned long flags;
- struct drm_device *dev = sender->dev;
+ struct drm_device *dev;
int i;
u32 gen_data_reg;
int retry = MDFLD_DSI_READ_MAX_COUNT;
@@ -530,6 +530,8 @@ static int __read_panel_data(struct mdfld_dsi_pkg_sender *sender, u8 data_type,
return -EINVAL;
}
+ dev = sender->dev;
+
/**
* do reading.
* 0) send out generic read request
diff --git a/drivers/gpu/drm/gma500/mdfld_intel_display.c b/drivers/gpu/drm/gma500/mdfld_intel_display.c
index 63c6e08600ae..531e4450c000 100644
--- a/drivers/gpu/drm/gma500/mdfld_intel_display.c
+++ b/drivers/gpu/drm/gma500/mdfld_intel_display.c
@@ -737,11 +737,7 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
sizeof(struct drm_display_mode));
list_for_each_entry(connector, &mode_config->connector_list, head) {
- if (!connector)
- continue;
-
encoder = connector->encoder;
-
if (!encoder)
continue;
diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
index 1f9b35afefee..37a3be71acd9 100644
--- a/drivers/gpu/drm/gma500/psb_drv.c
+++ b/drivers/gpu/drm/gma500/psb_drv.c
@@ -480,7 +480,6 @@ static struct drm_driver driver = {
.load = psb_driver_load,
.unload = psb_driver_unload,
.lastclose = psb_driver_lastclose,
- .set_busid = drm_pci_set_busid,
.num_ioctls = ARRAY_SIZE(psb_ioctls),
.irq_preinstall = psb_irq_preinstall,
@@ -495,8 +494,6 @@ static struct drm_driver driver = {
.gem_vm_ops = &psb_gem_vm_ops,
.dumb_create = psb_gem_dumb_create,
- .dumb_map_offset = psb_gem_dumb_map_gtt,
- .dumb_destroy = drm_gem_dumb_destroy,
.ioctls = psb_ioctls,
.fops = &psb_gem_fops,
.name = DRIVER_NAME,
@@ -517,12 +514,12 @@ static struct pci_driver psb_pci_driver = {
static int __init psb_init(void)
{
- return drm_pci_init(&driver, &psb_pci_driver);
+ return pci_register_driver(&psb_pci_driver);
}
static void __exit psb_exit(void)
{
- drm_pci_exit(&driver, &psb_pci_driver);
+ pci_unregister_driver(&psb_pci_driver);
}
late_initcall(psb_init);
diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
index 83667087d6e5..821497dbd3fc 100644
--- a/drivers/gpu/drm/gma500/psb_drv.h
+++ b/drivers/gpu/drm/gma500/psb_drv.h
@@ -750,8 +750,6 @@ extern int psb_gem_get_aperture(struct drm_device *dev, void *data,
struct drm_file *file);
extern int psb_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args);
-extern int psb_gem_dumb_map_gtt(struct drm_file *file, struct drm_device *dev,
- uint32_t handle, uint64_t *offset);
extern int psb_gem_fault(struct vm_fault *vmf);
/* psb_device.c */
diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c
index 7b6c84925098..8762efaef283 100644
--- a/drivers/gpu/drm/gma500/psb_intel_display.c
+++ b/drivers/gpu/drm/gma500/psb_intel_display.c
@@ -518,13 +518,8 @@ void psb_intel_crtc_init(struct drm_device *dev, int pipe,
gma_crtc->pipe = pipe;
gma_crtc->plane = pipe;
- for (i = 0; i < 256; i++) {
- gma_crtc->lut_r[i] = i;
- gma_crtc->lut_g[i] = i;
- gma_crtc->lut_b[i] = i;
-
+ for (i = 0; i < 256; i++)
gma_crtc->lut_adj[i] = 0;
- }
gma_crtc->mode_dev = mode_dev;
gma_crtc->cursor_addr = 0;
diff --git a/drivers/gpu/drm/gma500/psb_intel_drv.h b/drivers/gpu/drm/gma500/psb_intel_drv.h
index 6a10215fc42d..e8e4ea14b12b 100644
--- a/drivers/gpu/drm/gma500/psb_intel_drv.h
+++ b/drivers/gpu/drm/gma500/psb_intel_drv.h
@@ -172,7 +172,6 @@ struct gma_crtc {
int plane;
uint32_t cursor_addr;
struct gtt_range *cursor_gt;
- u8 lut_r[256], lut_g[256], lut_b[256];
u8 lut_adj[256];
struct psb_intel_framebuffer *fbdev_fb;
/* a mode_set for fbdev users on this crtc */
diff --git a/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.c b/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.c
index 771ff66711af..37c997e24b9e 100644
--- a/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.c
+++ b/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.c
@@ -26,7 +26,7 @@
#include "mdfld_output.h"
#include "mdfld_dsi_pkg_sender.h"
#include "tc35876x-dsi-lvds.h"
-#include <linux/i2c/tc35876x.h>
+#include <linux/platform_data/tc35876x.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <asm/intel_scu_ipc.h>
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c
index 59542bddc980..a956545774a3 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c
@@ -150,7 +150,6 @@ static const u32 channel_formats1[] = {
static struct drm_plane_funcs hibmc_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
- .set_property = drm_atomic_helper_plane_set_property,
.destroy = drm_plane_cleanup,
.reset = drm_atomic_helper_plane_reset,
.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
@@ -181,6 +180,7 @@ static struct drm_plane *hibmc_plane_init(struct hibmc_drm_private *priv)
ret = drm_universal_plane_init(dev, plane, 1, &hibmc_plane_funcs,
channel_formats1,
ARRAY_SIZE(channel_formats1),
+ NULL,
DRM_PLANE_TYPE_PRIMARY,
NULL);
if (ret) {
@@ -192,7 +192,8 @@ static struct drm_plane *hibmc_plane_init(struct hibmc_drm_private *priv)
return plane;
}
-static void hibmc_crtc_enable(struct drm_crtc *crtc)
+static void hibmc_crtc_atomic_enable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
{
unsigned int reg;
struct hibmc_drm_private *priv = crtc->dev->dev_private;
@@ -209,7 +210,8 @@ static void hibmc_crtc_enable(struct drm_crtc *crtc)
drm_crtc_vblank_on(crtc);
}
-static void hibmc_crtc_disable(struct drm_crtc *crtc)
+static void hibmc_crtc_atomic_disable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
{
unsigned int reg;
struct hibmc_drm_private *priv = crtc->dev->dev_private;
@@ -453,11 +455,11 @@ static const struct drm_crtc_funcs hibmc_crtc_funcs = {
};
static const struct drm_crtc_helper_funcs hibmc_crtc_helper_funcs = {
- .enable = hibmc_crtc_enable,
- .disable = hibmc_crtc_disable,
.mode_set_nofb = hibmc_crtc_mode_set_nofb,
.atomic_begin = hibmc_crtc_atomic_begin,
.atomic_flush = hibmc_crtc_atomic_flush,
+ .atomic_enable = hibmc_crtc_atomic_enable,
+ .atomic_disable = hibmc_crtc_atomic_disable,
};
int hibmc_de_init(struct hibmc_drm_private *priv)
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
index 2ffdbf9801bd..d4f6f1f9df5b 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
@@ -67,7 +67,6 @@ static struct drm_driver hibmc_driver = {
.gem_free_object_unlocked = hibmc_gem_free_object,
.dumb_create = hibmc_dumb_create,
.dumb_map_offset = hibmc_dumb_mmap_offset,
- .dumb_destroy = drm_gem_dumb_destroy,
.irq_handler = hibmc_drm_interrupt,
};
@@ -276,11 +275,12 @@ static int hibmc_unload(struct drm_device *dev)
hibmc_fbdev_fini(priv);
+ drm_atomic_helper_shutdown(dev);
+
if (dev->irq_enabled)
drm_irq_uninstall(dev);
if (priv->msi_enabled)
pci_disable_msi(dev->pdev);
- drm_vblank_cleanup(dev);
hibmc_kms_fini(priv);
hibmc_mm_fini(priv);
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c
index f5ac80daeef2..b92595c477ef 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c
@@ -131,7 +131,6 @@ static int hibmc_drm_fb_create(struct drm_fb_helper *helper,
strcpy(info->fix.id, "hibmcdrmfb");
- info->flags = FBINFO_DEFAULT;
info->fbops = &hibmc_drm_fb_ops;
drm_fb_helper_fill_fix(info, hi_fbdev->fb->fb.pitches[0],
@@ -158,7 +157,7 @@ out_unpin_bo:
out_unreserve_ttm_bo:
ttm_bo_unreserve(&bo->bo);
out_unref_gem:
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
return ret;
}
@@ -173,7 +172,7 @@ static void hibmc_fbdev_destroy(struct hibmc_fbdev *fbdev)
drm_fb_helper_fini(fbh);
if (gfb)
- drm_framebuffer_unreference(&gfb->fb);
+ drm_framebuffer_put(&gfb->fb);
}
static const struct drm_fb_helper_funcs hibmc_fbdev_helper_funcs = {
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
index 12a18557c5fd..ec4dd9df9150 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
@@ -47,7 +47,6 @@ static const struct drm_connector_helper_funcs
};
static const struct drm_connector_funcs hibmc_connector_funcs = {
- .dpms = drm_atomic_helper_connector_dpms,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = drm_connector_cleanup,
.reset = drm_atomic_helper_connector_reset,
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
index ac457c779caa..3518167a7dc4 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
@@ -444,7 +444,7 @@ int hibmc_dumb_create(struct drm_file *file, struct drm_device *dev,
}
ret = drm_gem_handle_create(file, gobj, &handle);
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
if (ret) {
DRM_ERROR("failed to unreference GEM object: %d\n", ret);
return ret;
@@ -479,7 +479,7 @@ int hibmc_dumb_mmap_offset(struct drm_file *file, struct drm_device *dev,
bo = gem_to_hibmc_bo(obj);
*offset = hibmc_bo_mmap_offset(bo);
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return 0;
}
@@ -487,7 +487,7 @@ static void hibmc_user_framebuffer_destroy(struct drm_framebuffer *fb)
{
struct hibmc_framebuffer *hibmc_fb = to_hibmc_framebuffer(fb);
- drm_gem_object_unreference_unlocked(hibmc_fb->obj);
+ drm_gem_object_put_unlocked(hibmc_fb->obj);
drm_framebuffer_cleanup(fb);
kfree(hibmc_fb);
}
@@ -543,7 +543,7 @@ hibmc_user_framebuffer_create(struct drm_device *dev,
hibmc_fb = hibmc_framebuffer_init(dev, mode_cmd, obj);
if (IS_ERR(hibmc_fb)) {
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return ERR_PTR((long)hibmc_fb);
}
return &hibmc_fb->fb;
diff --git a/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c b/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c
index f77dcfaade6c..b4c7af3ab6ae 100644
--- a/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c
+++ b/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c
@@ -603,6 +603,72 @@ static void dsi_encoder_enable(struct drm_encoder *encoder)
dsi->enable = true;
}
+static enum drm_mode_status dsi_encoder_phy_mode_valid(
+ struct drm_encoder *encoder,
+ const struct drm_display_mode *mode)
+{
+ struct dw_dsi *dsi = encoder_to_dsi(encoder);
+ struct mipi_phy_params phy;
+ u32 bpp = mipi_dsi_pixel_format_to_bpp(dsi->format);
+ u32 req_kHz, act_kHz, lane_byte_clk_kHz;
+
+ /* Calculate the lane byte clk using the adjusted mode clk */
+ memset(&phy, 0, sizeof(phy));
+ req_kHz = mode->clock * bpp / dsi->lanes;
+ act_kHz = dsi_calc_phy_rate(req_kHz, &phy);
+ lane_byte_clk_kHz = act_kHz / 8;
+
+ DRM_DEBUG_DRIVER("Checking mode %ix%i-%i@%i clock: %i...",
+ mode->hdisplay, mode->vdisplay, bpp,
+ drm_mode_vrefresh(mode), mode->clock);
+
+ /*
+ * Make sure the adjusted mode clock and the lane byte clk
+ * have a common denominator base frequency
+ */
+ if (mode->clock/dsi->lanes == lane_byte_clk_kHz/3) {
+ DRM_DEBUG_DRIVER("OK!\n");
+ return MODE_OK;
+ }
+
+ DRM_DEBUG_DRIVER("BAD!\n");
+ return MODE_BAD;
+}
+
+static enum drm_mode_status dsi_encoder_mode_valid(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode)
+
+{
+ const struct drm_crtc_helper_funcs *crtc_funcs = NULL;
+ struct drm_crtc *crtc = NULL;
+ struct drm_display_mode adj_mode;
+ enum drm_mode_status ret;
+
+ /*
+ * The crtc might adjust the mode, so go through the
+ * possible crtcs (technically just one) and call
+ * mode_fixup to figure out the adjusted mode before we
+ * validate it.
+ */
+ drm_for_each_crtc(crtc, encoder->dev) {
+ /*
+ * reset adj_mode to the mode value each time,
+ * so we don't adjust the mode twice
+ */
+ drm_mode_copy(&adj_mode, mode);
+
+ crtc_funcs = crtc->helper_private;
+ if (crtc_funcs && crtc_funcs->mode_fixup)
+ if (!crtc_funcs->mode_fixup(crtc, mode, &adj_mode))
+ return MODE_BAD;
+
+ ret = dsi_encoder_phy_mode_valid(encoder, &adj_mode);
+ if (ret != MODE_OK)
+ return ret;
+ }
+ return MODE_OK;
+}
+
static void dsi_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adj_mode)
@@ -622,6 +688,7 @@ static int dsi_encoder_atomic_check(struct drm_encoder *encoder,
static const struct drm_encoder_helper_funcs dw_encoder_helper_funcs = {
.atomic_check = dsi_encoder_atomic_check,
+ .mode_valid = dsi_encoder_mode_valid,
.mode_set = dsi_encoder_mode_set,
.enable = dsi_encoder_enable,
.disable = dsi_encoder_disable
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
index c96c228a9898..9823477b1855 100644
--- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
+++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
@@ -178,6 +178,19 @@ static void ade_init(struct ade_hw_ctx *ctx)
FRM_END_START_MASK, REG_EFFECTIVE_IN_ADEEN_FRMEND);
}
+static bool ade_crtc_mode_fixup(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct ade_crtc *acrtc = to_ade_crtc(crtc);
+ struct ade_hw_ctx *ctx = acrtc->ctx;
+
+ adjusted_mode->clock =
+ clk_round_rate(ctx->ade_pix_clk, mode->clock * 1000) / 1000;
+ return true;
+}
+
+
static void ade_set_pix_clk(struct ade_hw_ctx *ctx,
struct drm_display_mode *mode,
struct drm_display_mode *adj_mode)
@@ -467,7 +480,8 @@ static void ade_dump_regs(void __iomem *base)
static void ade_dump_regs(void __iomem *base) { }
#endif
-static void ade_crtc_enable(struct drm_crtc *crtc)
+static void ade_crtc_atomic_enable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
{
struct ade_crtc *acrtc = to_ade_crtc(crtc);
struct ade_hw_ctx *ctx = acrtc->ctx;
@@ -489,7 +503,8 @@ static void ade_crtc_enable(struct drm_crtc *crtc)
acrtc->enable = true;
}
-static void ade_crtc_disable(struct drm_crtc *crtc)
+static void ade_crtc_atomic_disable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
{
struct ade_crtc *acrtc = to_ade_crtc(crtc);
struct ade_hw_ctx *ctx = acrtc->ctx;
@@ -553,11 +568,12 @@ static void ade_crtc_atomic_flush(struct drm_crtc *crtc,
}
static const struct drm_crtc_helper_funcs ade_crtc_helper_funcs = {
- .enable = ade_crtc_enable,
- .disable = ade_crtc_disable,
+ .mode_fixup = ade_crtc_mode_fixup,
.mode_set_nofb = ade_crtc_mode_set_nofb,
.atomic_begin = ade_crtc_atomic_begin,
.atomic_flush = ade_crtc_atomic_flush,
+ .atomic_enable = ade_crtc_atomic_enable,
+ .atomic_disable = ade_crtc_atomic_disable,
};
static const struct drm_crtc_funcs ade_crtc_funcs = {
@@ -565,7 +581,6 @@ static const struct drm_crtc_funcs ade_crtc_funcs = {
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
.reset = drm_atomic_helper_crtc_reset,
- .set_property = drm_atomic_helper_crtc_set_property,
.atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
.enable_vblank = ade_crtc_enable_vblank,
@@ -583,8 +598,7 @@ static int ade_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
*/
port = of_get_child_by_name(dev->dev->of_node, "port");
if (!port) {
- DRM_ERROR("no port node found in %s\n",
- dev->dev->of_node->full_name);
+ DRM_ERROR("no port node found in %pOF\n", dev->dev->of_node);
return -EINVAL;
}
of_node_put(port);
@@ -889,7 +903,6 @@ static const struct drm_plane_helper_funcs ade_plane_helper_funcs = {
static struct drm_plane_funcs ade_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
- .set_property = drm_atomic_helper_plane_set_property,
.destroy = drm_plane_cleanup,
.reset = drm_atomic_helper_plane_reset,
.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
@@ -909,7 +922,7 @@ static int ade_plane_init(struct drm_device *dev, struct ade_plane *aplane,
return ret;
ret = drm_universal_plane_init(dev, &aplane->base, 1, &ade_plane_funcs,
- fmts, fmts_cnt, type, NULL);
+ fmts, fmts_cnt, NULL, type, NULL);
if (ret) {
DRM_ERROR("fail to init plane, ch=%d\n", aplane->ch);
return ret;
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
index 9c903672f582..e27352ca26c4 100644
--- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
+++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
@@ -34,14 +34,12 @@ static int kirin_drm_kms_cleanup(struct drm_device *dev)
{
struct kirin_drm_private *priv = dev->dev_private;
-#ifdef CONFIG_DRM_FBDEV_EMULATION
if (priv->fbdev) {
drm_fbdev_cma_fini(priv->fbdev);
priv->fbdev = NULL;
}
-#endif
+
drm_kms_helper_poll_fini(dev);
- drm_vblank_cleanup(dev);
dc_ops->cleanup(to_platform_device(dev->dev));
drm_mode_config_cleanup(dev);
devm_kfree(dev->dev, priv);
@@ -50,27 +48,16 @@ static int kirin_drm_kms_cleanup(struct drm_device *dev)
return 0;
}
-#ifdef CONFIG_DRM_FBDEV_EMULATION
static void kirin_fbdev_output_poll_changed(struct drm_device *dev)
{
struct kirin_drm_private *priv = dev->dev_private;
- if (priv->fbdev) {
- drm_fbdev_cma_hotplug_event(priv->fbdev);
- } else {
- priv->fbdev = drm_fbdev_cma_init(dev, 32,
- dev->mode_config.num_connector);
- if (IS_ERR(priv->fbdev))
- priv->fbdev = NULL;
- }
+ drm_fbdev_cma_hotplug_event(priv->fbdev);
}
-#endif
static const struct drm_mode_config_funcs kirin_drm_mode_config_funcs = {
.fb_create = drm_fb_cma_create,
-#ifdef CONFIG_DRM_FBDEV_EMULATION
.output_poll_changed = kirin_fbdev_output_poll_changed,
-#endif
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
};
@@ -129,11 +116,18 @@ static int kirin_drm_kms_init(struct drm_device *dev)
/* init kms poll for handling hpd */
drm_kms_helper_poll_init(dev);
- /* force detection after connectors init */
- (void)drm_helper_hpd_irq_event(dev);
+ priv->fbdev = drm_fbdev_cma_init(dev, 32,
+ dev->mode_config.num_connector);
+ if (IS_ERR(priv->fbdev)) {
+ DRM_ERROR("failed to initialize fbdev.\n");
+ ret = PTR_ERR(priv->fbdev);
+ goto err_cleanup_poll;
+ }
return 0;
+err_cleanup_poll:
+ drm_kms_helper_poll_fini(dev);
err_unbind_all:
component_unbind_all(dev->dev, dev);
err_dc_cleanup:
@@ -163,8 +157,6 @@ static struct drm_driver kirin_drm_driver = {
.gem_free_object_unlocked = drm_gem_cma_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
.dumb_create = kirin_gem_cma_dumb_create,
- .dumb_map_offset = drm_gem_cma_dumb_map_offset,
- .dumb_destroy = drm_gem_dumb_destroy,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.h b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.h
index 7f60c64915d9..56cb62df065c 100644
--- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.h
+++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.h
@@ -20,9 +20,7 @@ struct kirin_dc_ops {
};
struct kirin_drm_private {
-#ifdef CONFIG_DRM_FBDEV_EMULATION
struct drm_fbdev_cma *fbdev;
-#endif
};
extern const struct kirin_dc_ops ade_dc_ops;
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index 86f47e190309..54e3255dde13 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -712,7 +712,7 @@ tda998x_write_avi(struct tda998x_priv *priv, struct drm_display_mode *mode)
{
union hdmi_infoframe frame;
- drm_hdmi_avi_infoframe_from_display_mode(&frame.avi, mode);
+ drm_hdmi_avi_infoframe_from_display_mode(&frame.avi, mode, false);
frame.avi.quantization_range = HDMI_QUANTIZATION_RANGE_FULL;
tda998x_write_if(priv, DIP_IF_FLAGS_IF2, REG_IF2_HB0, &frame);
@@ -969,14 +969,6 @@ static int tda998x_audio_codec_init(struct tda998x_priv *priv,
/* DRM connector functions */
-static int tda998x_connector_dpms(struct drm_connector *connector, int mode)
-{
- if (drm_core_check_feature(connector->dev, DRIVER_ATOMIC))
- return drm_atomic_helper_connector_dpms(connector, mode);
- else
- return drm_helper_connector_dpms(connector, mode);
-}
-
static int tda998x_connector_fill_modes(struct drm_connector *connector,
uint32_t maxX, uint32_t maxY)
{
@@ -1014,7 +1006,7 @@ static void tda998x_connector_destroy(struct drm_connector *connector)
}
static const struct drm_connector_funcs tda998x_connector_funcs = {
- .dpms = tda998x_connector_dpms,
+ .dpms = drm_helper_connector_dpms,
.reset = drm_atomic_helper_connector_reset,
.fill_modes = tda998x_connector_fill_modes,
.detect = tda998x_connector_detect,
diff --git a/drivers/gpu/drm/i810/i810_drv.c b/drivers/gpu/drm/i810/i810_drv.c
index 37fd0906f807..c69d5c487f51 100644
--- a/drivers/gpu/drm/i810/i810_drv.c
+++ b/drivers/gpu/drm/i810/i810_drv.c
@@ -59,7 +59,6 @@ static struct drm_driver driver = {
.load = i810_driver_load,
.lastclose = i810_driver_lastclose,
.preclose = i810_driver_preclose,
- .set_busid = drm_pci_set_busid,
.dma_quiescent = i810_driver_dma_quiescent,
.ioctls = i810_ioctls,
.fops = &i810_driver_fops,
@@ -83,12 +82,12 @@ static int __init i810_init(void)
return -EINVAL;
}
driver.num_ioctls = i810_max_ioctl;
- return drm_pci_init(&driver, &i810_pci_driver);
+ return drm_legacy_pci_init(&driver, &i810_pci_driver);
}
static void __exit i810_exit(void)
{
- drm_pci_exit(&driver, &i810_pci_driver);
+ drm_legacy_pci_exit(&driver, &i810_pci_driver);
}
module_init(i810_init);
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index a5cd5dacf055..e9e64e8e9765 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -21,6 +21,7 @@ config DRM_I915
select ACPI_BUTTON if ACPI
select SYNC_FILE
select IOSF_MBI
+ select CRC32
help
Choose this option if you have a system that has "Intel Graphics
Media Accelerator" or "HD Graphics" integrated graphics,
diff --git a/drivers/gpu/drm/i915/Kconfig.debug b/drivers/gpu/drm/i915/Kconfig.debug
index 78c5c049a347..aed7d207ea84 100644
--- a/drivers/gpu/drm/i915/Kconfig.debug
+++ b/drivers/gpu/drm/i915/Kconfig.debug
@@ -25,6 +25,7 @@ config DRM_I915_DEBUG
select DRM_VGEM # used by igt/prime_vgem (dmabuf interop checks)
select DRM_DEBUG_MM if DRM=y
select DRM_DEBUG_MM_SELFTEST
+ select SW_SYNC # signaling validation framework (igt/syncobj*)
select DRM_I915_SW_FENCE_DEBUG_OBJECTS
select DRM_I915_SELFTEST
default n
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index f8227318dcaf..892f52b53060 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -39,6 +39,7 @@ i915-y += i915_cmd_parser.o \
i915_gem_gtt.o \
i915_gem_internal.o \
i915_gem.o \
+ i915_gem_object.o \
i915_gem_render_state.o \
i915_gem_request.o \
i915_gem_shrinker.o \
diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c b/drivers/gpu/drm/i915/gvt/aperture_gm.c
index 325618d969fe..ca3d1925beda 100644
--- a/drivers/gpu/drm/i915/gvt/aperture_gm.c
+++ b/drivers/gpu/drm/i915/gvt/aperture_gm.c
@@ -285,8 +285,8 @@ static int alloc_resource(struct intel_vgpu *vgpu,
return 0;
no_enough_resource:
- gvt_vgpu_err("fail to allocate resource %s\n", item);
- gvt_vgpu_err("request %luMB avail %luMB max %luMB taken %luMB\n",
+ gvt_err("fail to allocate resource %s\n", item);
+ gvt_err("request %luMB avail %luMB max %luMB taken %luMB\n",
BYTES_TO_MB(request), BYTES_TO_MB(avail),
BYTES_TO_MB(max), BYTES_TO_MB(taken));
return -ENOSPC;
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index 713848c36349..21c36e256884 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -1382,13 +1382,13 @@ static inline int cmd_address_audit(struct parser_exec_state *s,
ret = -EINVAL;
goto err;
}
- } else if ((!vgpu_gmadr_is_valid(s->vgpu, guest_gma)) ||
- (!vgpu_gmadr_is_valid(s->vgpu,
- guest_gma + op_size - 1))) {
+ } else if (!intel_gvt_ggtt_validate_range(vgpu, guest_gma, op_size)) {
ret = -EINVAL;
goto err;
}
+
return 0;
+
err:
gvt_vgpu_err("cmd_parser: Malicious %s detected, addr=0x%lx, len=%d!\n",
s->info->name, guest_gma, op_size);
@@ -2647,7 +2647,7 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
return 0;
}
-int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
+int intel_gvt_scan_and_shadow_ringbuffer(struct intel_vgpu_workload *workload)
{
int ret;
struct intel_vgpu *vgpu = workload->vgpu;
@@ -2714,7 +2714,7 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
unmap_src:
i915_gem_object_unpin_map(obj);
put_obj:
- i915_gem_object_put(wa_ctx->indirect_ctx.obj);
+ i915_gem_object_put(obj);
return ret;
}
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.h b/drivers/gpu/drm/i915/gvt/cmd_parser.h
index bed33514103c..286703643002 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.h
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.h
@@ -42,7 +42,7 @@ void intel_gvt_clean_cmd_parser(struct intel_gvt *gvt);
int intel_gvt_init_cmd_parser(struct intel_gvt *gvt);
-int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload);
+int intel_gvt_scan_and_shadow_ringbuffer(struct intel_vgpu_workload *workload);
int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx);
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c
index 7cb0818a13de..3c318439a659 100644
--- a/drivers/gpu/drm/i915/gvt/display.c
+++ b/drivers/gpu/drm/i915/gvt/display.c
@@ -178,9 +178,9 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
SDE_PORTE_HOTPLUG_SPT);
vgpu_vreg(vgpu, SKL_FUSE_STATUS) |=
SKL_FUSE_DOWNLOAD_STATUS |
- SKL_FUSE_PG0_DIST_STATUS |
- SKL_FUSE_PG1_DIST_STATUS |
- SKL_FUSE_PG2_DIST_STATUS;
+ SKL_FUSE_PG_DIST_STATUS(SKL_PG0) |
+ SKL_FUSE_PG_DIST_STATUS(SKL_PG1) |
+ SKL_FUSE_PG_DIST_STATUS(SKL_PG2);
vgpu_vreg(vgpu, LCPLL1_CTL) |=
LCPLL_PLL_ENABLE |
LCPLL_PLL_LOCK;
diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c
index 1648887d3f55..91b4300f3b39 100644
--- a/drivers/gpu/drm/i915/gvt/execlist.c
+++ b/drivers/gpu/drm/i915/gvt/execlist.c
@@ -622,6 +622,7 @@ static int submit_context(struct intel_vgpu *vgpu, int ring_id,
struct list_head *q = workload_q_head(vgpu, ring_id);
struct intel_vgpu_workload *last_workload = get_last_workload(q);
struct intel_vgpu_workload *workload = NULL;
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
u64 ring_context_gpa;
u32 head, tail, start, ctl, ctx_ctl, per_ctx, indirect_ctx;
int ret;
@@ -685,6 +686,7 @@ static int submit_context(struct intel_vgpu *vgpu, int ring_id,
workload->complete = complete_execlist_workload;
workload->status = -EINPROGRESS;
workload->emulate_schedule_in = emulate_schedule_in;
+ workload->shadowed = false;
if (ring_id == RCS) {
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
@@ -718,6 +720,17 @@ static int submit_context(struct intel_vgpu *vgpu, int ring_id,
return ret;
}
+ /* Only scan and shadow the first workload in the queue
+ * as there is only one pre-allocated buf-obj for shadow.
+ */
+ if (list_empty(workload_q_head(vgpu, ring_id))) {
+ intel_runtime_pm_get(dev_priv);
+ mutex_lock(&dev_priv->drm.struct_mutex);
+ intel_gvt_scan_and_shadow_workload(workload);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+ intel_runtime_pm_put(dev_priv);
+ }
+
queue_workload(workload);
return 0;
}
@@ -800,6 +813,8 @@ static void clean_workloads(struct intel_vgpu *vgpu, unsigned long engine_mask)
list_del_init(&pos->list);
free_workload(pos);
}
+
+ clear_bit(engine->id, vgpu->shadow_ctx_desc_updated);
}
}
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 6166e34d892b..e6dfc3331f4b 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -259,7 +259,7 @@ static void write_pte64(struct drm_i915_private *dev_priv,
writeq(pte, addr);
}
-static inline struct intel_gvt_gtt_entry *gtt_get_entry64(void *pt,
+static inline int gtt_get_entry64(void *pt,
struct intel_gvt_gtt_entry *e,
unsigned long index, bool hypervisor_access, unsigned long gpa,
struct intel_vgpu *vgpu)
@@ -268,22 +268,23 @@ static inline struct intel_gvt_gtt_entry *gtt_get_entry64(void *pt,
int ret;
if (WARN_ON(info->gtt_entry_size != 8))
- return e;
+ return -EINVAL;
if (hypervisor_access) {
ret = intel_gvt_hypervisor_read_gpa(vgpu, gpa +
(index << info->gtt_entry_size_shift),
&e->val64, 8);
- WARN_ON(ret);
+ if (WARN_ON(ret))
+ return ret;
} else if (!pt) {
e->val64 = read_pte64(vgpu->gvt->dev_priv, index);
} else {
e->val64 = *((u64 *)pt + index);
}
- return e;
+ return 0;
}
-static inline struct intel_gvt_gtt_entry *gtt_set_entry64(void *pt,
+static inline int gtt_set_entry64(void *pt,
struct intel_gvt_gtt_entry *e,
unsigned long index, bool hypervisor_access, unsigned long gpa,
struct intel_vgpu *vgpu)
@@ -292,19 +293,20 @@ static inline struct intel_gvt_gtt_entry *gtt_set_entry64(void *pt,
int ret;
if (WARN_ON(info->gtt_entry_size != 8))
- return e;
+ return -EINVAL;
if (hypervisor_access) {
ret = intel_gvt_hypervisor_write_gpa(vgpu, gpa +
(index << info->gtt_entry_size_shift),
&e->val64, 8);
- WARN_ON(ret);
+ if (WARN_ON(ret))
+ return ret;
} else if (!pt) {
write_pte64(vgpu->gvt->dev_priv, index, e->val64);
} else {
*((u64 *)pt + index) = e->val64;
}
- return e;
+ return 0;
}
#define GTT_HAW 46
@@ -445,21 +447,25 @@ static int gtt_entry_p2m(struct intel_vgpu *vgpu, struct intel_gvt_gtt_entry *p,
/*
* MM helpers.
*/
-struct intel_gvt_gtt_entry *intel_vgpu_mm_get_entry(struct intel_vgpu_mm *mm,
+int intel_vgpu_mm_get_entry(struct intel_vgpu_mm *mm,
void *page_table, struct intel_gvt_gtt_entry *e,
unsigned long index)
{
struct intel_gvt *gvt = mm->vgpu->gvt;
struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
+ int ret;
e->type = mm->page_table_entry_type;
- ops->get_entry(page_table, e, index, false, 0, mm->vgpu);
+ ret = ops->get_entry(page_table, e, index, false, 0, mm->vgpu);
+ if (ret)
+ return ret;
+
ops->test_pse(e);
- return e;
+ return 0;
}
-struct intel_gvt_gtt_entry *intel_vgpu_mm_set_entry(struct intel_vgpu_mm *mm,
+int intel_vgpu_mm_set_entry(struct intel_vgpu_mm *mm,
void *page_table, struct intel_gvt_gtt_entry *e,
unsigned long index)
{
@@ -472,7 +478,7 @@ struct intel_gvt_gtt_entry *intel_vgpu_mm_set_entry(struct intel_vgpu_mm *mm,
/*
* PPGTT shadow page table helpers.
*/
-static inline struct intel_gvt_gtt_entry *ppgtt_spt_get_entry(
+static inline int ppgtt_spt_get_entry(
struct intel_vgpu_ppgtt_spt *spt,
void *page_table, int type,
struct intel_gvt_gtt_entry *e, unsigned long index,
@@ -480,20 +486,24 @@ static inline struct intel_gvt_gtt_entry *ppgtt_spt_get_entry(
{
struct intel_gvt *gvt = spt->vgpu->gvt;
struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
+ int ret;
e->type = get_entry_type(type);
if (WARN(!gtt_type_is_entry(e->type), "invalid entry type\n"))
- return e;
+ return -EINVAL;
- ops->get_entry(page_table, e, index, guest,
+ ret = ops->get_entry(page_table, e, index, guest,
spt->guest_page.gfn << GTT_PAGE_SHIFT,
spt->vgpu);
+ if (ret)
+ return ret;
+
ops->test_pse(e);
- return e;
+ return 0;
}
-static inline struct intel_gvt_gtt_entry *ppgtt_spt_set_entry(
+static inline int ppgtt_spt_set_entry(
struct intel_vgpu_ppgtt_spt *spt,
void *page_table, int type,
struct intel_gvt_gtt_entry *e, unsigned long index,
@@ -503,7 +513,7 @@ static inline struct intel_gvt_gtt_entry *ppgtt_spt_set_entry(
struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
if (WARN(!gtt_type_is_entry(e->type), "invalid entry type\n"))
- return e;
+ return -EINVAL;
return ops->set_entry(page_table, e, index, guest,
spt->guest_page.gfn << GTT_PAGE_SHIFT,
@@ -792,13 +802,13 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_find_shadow_page(
#define for_each_present_guest_entry(spt, e, i) \
for (i = 0; i < pt_entries(spt); i++) \
- if (spt->vgpu->gvt->gtt.pte_ops->test_present( \
- ppgtt_get_guest_entry(spt, e, i)))
+ if (!ppgtt_get_guest_entry(spt, e, i) && \
+ spt->vgpu->gvt->gtt.pte_ops->test_present(e))
#define for_each_present_shadow_entry(spt, e, i) \
for (i = 0; i < pt_entries(spt); i++) \
- if (spt->vgpu->gvt->gtt.pte_ops->test_present( \
- ppgtt_get_shadow_entry(spt, e, i)))
+ if (!ppgtt_get_shadow_entry(spt, e, i) && \
+ spt->vgpu->gvt->gtt.pte_ops->test_present(e))
static void ppgtt_get_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
{
@@ -979,29 +989,26 @@ fail:
}
static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_guest_page *gpt,
- unsigned long index)
+ struct intel_gvt_gtt_entry *se, unsigned long index)
{
struct intel_vgpu_ppgtt_spt *spt = guest_page_to_ppgtt_spt(gpt);
struct intel_vgpu_shadow_page *sp = &spt->shadow_page;
struct intel_vgpu *vgpu = spt->vgpu;
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
- struct intel_gvt_gtt_entry e;
int ret;
- ppgtt_get_shadow_entry(spt, &e, index);
-
- trace_gpt_change(spt->vgpu->id, "remove", spt, sp->type, e.val64,
+ trace_gpt_change(spt->vgpu->id, "remove", spt, sp->type, se->val64,
index);
- if (!ops->test_present(&e))
+ if (!ops->test_present(se))
return 0;
- if (ops->get_pfn(&e) == vgpu->gtt.scratch_pt[sp->type].page_mfn)
+ if (ops->get_pfn(se) == vgpu->gtt.scratch_pt[sp->type].page_mfn)
return 0;
- if (gtt_type_is_pt(get_next_pt_type(e.type))) {
+ if (gtt_type_is_pt(get_next_pt_type(se->type))) {
struct intel_vgpu_ppgtt_spt *s =
- ppgtt_find_shadow_page(vgpu, ops->get_pfn(&e));
+ ppgtt_find_shadow_page(vgpu, ops->get_pfn(se));
if (!s) {
gvt_vgpu_err("fail to find guest page\n");
ret = -ENXIO;
@@ -1011,12 +1018,10 @@ static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_guest_page *gpt,
if (ret)
goto fail;
}
- ops->set_pfn(&e, vgpu->gtt.scratch_pt[sp->type].page_mfn);
- ppgtt_set_shadow_entry(spt, &e, index);
return 0;
fail:
gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
- spt, e.val64, e.type);
+ spt, se->val64, se->type);
return ret;
}
@@ -1236,22 +1241,37 @@ static int ppgtt_handle_guest_write_page_table(
{
struct intel_vgpu_ppgtt_spt *spt = guest_page_to_ppgtt_spt(gpt);
struct intel_vgpu *vgpu = spt->vgpu;
+ int type = spt->shadow_page.type;
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
+ struct intel_gvt_gtt_entry se;
int ret;
int new_present;
new_present = ops->test_present(we);
- ret = ppgtt_handle_guest_entry_removal(gpt, index);
- if (ret)
- goto fail;
+ /*
+ * Adding the new entry first and then removing the old one, that can
+ * guarantee the ppgtt table is validated during the window between
+ * adding and removal.
+ */
+ ppgtt_get_shadow_entry(spt, &se, index);
if (new_present) {
ret = ppgtt_handle_guest_entry_add(gpt, we, index);
if (ret)
goto fail;
}
+
+ ret = ppgtt_handle_guest_entry_removal(gpt, &se, index);
+ if (ret)
+ goto fail;
+
+ if (!new_present) {
+ ops->set_pfn(&se, vgpu->gtt.scratch_pt[type].page_mfn);
+ ppgtt_set_shadow_entry(spt, &se, index);
+ }
+
return 0;
fail:
gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d.\n",
@@ -1323,7 +1343,7 @@ static int ppgtt_handle_guest_write_page_table_bytes(void *gp,
struct intel_vgpu *vgpu = spt->vgpu;
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
- struct intel_gvt_gtt_entry we;
+ struct intel_gvt_gtt_entry we, se;
unsigned long index;
int ret;
@@ -1339,7 +1359,8 @@ static int ppgtt_handle_guest_write_page_table_bytes(void *gp,
return ret;
} else {
if (!test_bit(index, spt->post_shadow_bitmap)) {
- ret = ppgtt_handle_guest_entry_removal(gpt, index);
+ ppgtt_get_shadow_entry(spt, &se, index);
+ ret = ppgtt_handle_guest_entry_removal(gpt, &se, index);
if (ret)
return ret;
}
@@ -1713,8 +1734,10 @@ unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm, unsigned long gma)
if (!vgpu_gmadr_is_valid(vgpu, gma))
goto err;
- ggtt_get_guest_entry(mm, &e,
- gma_ops->gma_to_ggtt_pte_index(gma));
+ ret = ggtt_get_guest_entry(mm, &e,
+ gma_ops->gma_to_ggtt_pte_index(gma));
+ if (ret)
+ goto err;
gpa = (pte_ops->get_pfn(&e) << GTT_PAGE_SHIFT)
+ (gma & ~GTT_PAGE_MASK);
@@ -1724,7 +1747,9 @@ unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm, unsigned long gma)
switch (mm->page_table_level) {
case 4:
- ppgtt_get_shadow_root_entry(mm, &e, 0);
+ ret = ppgtt_get_shadow_root_entry(mm, &e, 0);
+ if (ret)
+ goto err;
gma_index[0] = gma_ops->gma_to_pml4_index(gma);
gma_index[1] = gma_ops->gma_to_l4_pdp_index(gma);
gma_index[2] = gma_ops->gma_to_pde_index(gma);
@@ -1732,15 +1757,19 @@ unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm, unsigned long gma)
index = 4;
break;
case 3:
- ppgtt_get_shadow_root_entry(mm, &e,
+ ret = ppgtt_get_shadow_root_entry(mm, &e,
gma_ops->gma_to_l3_pdp_index(gma));
+ if (ret)
+ goto err;
gma_index[0] = gma_ops->gma_to_pde_index(gma);
gma_index[1] = gma_ops->gma_to_pte_index(gma);
index = 2;
break;
case 2:
- ppgtt_get_shadow_root_entry(mm, &e,
+ ret = ppgtt_get_shadow_root_entry(mm, &e,
gma_ops->gma_to_pde_index(gma));
+ if (ret)
+ goto err;
gma_index[0] = gma_ops->gma_to_pte_index(gma);
index = 1;
break;
@@ -1755,6 +1784,11 @@ unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm, unsigned long gma)
(i == index - 1));
if (ret)
goto err;
+
+ if (!pte_ops->test_present(&e)) {
+ gvt_dbg_core("GMA 0x%lx is not present\n", gma);
+ goto err;
+ }
}
gpa = (pte_ops->get_pfn(&e) << GTT_PAGE_SHIFT)
@@ -2329,13 +2363,12 @@ void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu)
/**
* intel_vgpu_reset_gtt - reset the all GTT related status
* @vgpu: a vGPU
- * @dmlr: true for vGPU Device Model Level Reset, false for GT Reset
*
* This function is called from vfio core to reset reset all
* GTT related status, including GGTT, PPGTT, scratch page.
*
*/
-void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu, bool dmlr)
+void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu)
{
int i;
@@ -2347,9 +2380,6 @@ void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu, bool dmlr)
*/
intel_vgpu_free_mm(vgpu, INTEL_GVT_MM_PPGTT);
- if (!dmlr)
- return;
-
intel_vgpu_reset_ggtt(vgpu);
/* clear scratch page for security */
diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h
index f88eb5e89bea..30a4c8d16026 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.h
+++ b/drivers/gpu/drm/i915/gvt/gtt.h
@@ -49,14 +49,18 @@ struct intel_gvt_gtt_entry {
};
struct intel_gvt_gtt_pte_ops {
- struct intel_gvt_gtt_entry *(*get_entry)(void *pt,
- struct intel_gvt_gtt_entry *e,
- unsigned long index, bool hypervisor_access, unsigned long gpa,
- struct intel_vgpu *vgpu);
- struct intel_gvt_gtt_entry *(*set_entry)(void *pt,
- struct intel_gvt_gtt_entry *e,
- unsigned long index, bool hypervisor_access, unsigned long gpa,
- struct intel_vgpu *vgpu);
+ int (*get_entry)(void *pt,
+ struct intel_gvt_gtt_entry *e,
+ unsigned long index,
+ bool hypervisor_access,
+ unsigned long gpa,
+ struct intel_vgpu *vgpu);
+ int (*set_entry)(void *pt,
+ struct intel_gvt_gtt_entry *e,
+ unsigned long index,
+ bool hypervisor_access,
+ unsigned long gpa,
+ struct intel_vgpu *vgpu);
bool (*test_present)(struct intel_gvt_gtt_entry *e);
void (*clear_present)(struct intel_gvt_gtt_entry *e);
bool (*test_pse)(struct intel_gvt_gtt_entry *e);
@@ -143,12 +147,12 @@ struct intel_vgpu_mm {
struct intel_vgpu *vgpu;
};
-extern struct intel_gvt_gtt_entry *intel_vgpu_mm_get_entry(
+extern int intel_vgpu_mm_get_entry(
struct intel_vgpu_mm *mm,
void *page_table, struct intel_gvt_gtt_entry *e,
unsigned long index);
-extern struct intel_gvt_gtt_entry *intel_vgpu_mm_set_entry(
+extern int intel_vgpu_mm_set_entry(
struct intel_vgpu_mm *mm,
void *page_table, struct intel_gvt_gtt_entry *e,
unsigned long index);
@@ -208,7 +212,7 @@ extern void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu);
void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu);
extern int intel_gvt_init_gtt(struct intel_gvt *gvt);
-extern void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu, bool dmlr);
+void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu);
extern void intel_gvt_clean_gtt(struct intel_gvt *gvt);
extern struct intel_vgpu_mm *intel_gvt_find_ppgtt_mm(struct intel_vgpu *vgpu,
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index 2964a4d01a66..44b719eda8c4 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -167,6 +167,7 @@ struct intel_vgpu {
atomic_t running_workload_num;
DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES);
struct i915_gem_context *shadow_ctx;
+ DECLARE_BITMAP(shadow_ctx_desc_updated, I915_NUM_ENGINES);
#if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT)
struct {
@@ -482,6 +483,8 @@ int intel_vgpu_init_opregion(struct intel_vgpu *vgpu, u32 gpa);
int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci);
void populate_pvinfo_page(struct intel_vgpu *vgpu);
+int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload);
+
struct intel_gvt_ops {
int (*emulate_cfg_read)(struct intel_vgpu *, unsigned int, void *,
unsigned int);
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index feed9921b3b3..2294466dd415 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -113,9 +113,17 @@ static int new_mmio_info(struct intel_gvt *gvt,
info->offset = i;
p = find_mmio_info(gvt, info->offset);
- if (p)
- gvt_err("dup mmio definition offset %x\n",
+ if (p) {
+ WARN(1, "dup mmio definition offset %x\n",
info->offset);
+ kfree(info);
+
+ /* We return -EEXIST here to make GVT-g load fail.
+ * So duplicated MMIO can be found as soon as
+ * possible.
+ */
+ return -EEXIST;
+ }
info->ro_mask = ro_mask;
info->device = device;
@@ -1222,10 +1230,12 @@ static int power_well_ctl_mmio_write(struct intel_vgpu *vgpu,
{
write_vreg(vgpu, offset, p_data, bytes);
- if (vgpu_vreg(vgpu, offset) & HSW_PWR_WELL_ENABLE_REQUEST)
- vgpu_vreg(vgpu, offset) |= HSW_PWR_WELL_STATE_ENABLED;
+ if (vgpu_vreg(vgpu, offset) & HSW_PWR_WELL_CTL_REQ(HSW_DISP_PW_GLOBAL))
+ vgpu_vreg(vgpu, offset) |=
+ HSW_PWR_WELL_CTL_STATE(HSW_DISP_PW_GLOBAL);
else
- vgpu_vreg(vgpu, offset) &= ~HSW_PWR_WELL_STATE_ENABLED;
+ vgpu_vreg(vgpu, offset) &=
+ ~HSW_PWR_WELL_CTL_STATE(HSW_DISP_PW_GLOBAL);
return 0;
}
@@ -2242,10 +2252,17 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_D(GEN6_RC6p_THRESHOLD, D_ALL);
MMIO_D(GEN6_RC6pp_THRESHOLD, D_ALL);
MMIO_D(GEN6_PMINTRMSK, D_ALL);
- MMIO_DH(HSW_PWR_WELL_BIOS, D_BDW, NULL, power_well_ctl_mmio_write);
- MMIO_DH(HSW_PWR_WELL_DRIVER, D_BDW, NULL, power_well_ctl_mmio_write);
- MMIO_DH(HSW_PWR_WELL_KVMR, D_BDW, NULL, power_well_ctl_mmio_write);
- MMIO_DH(HSW_PWR_WELL_DEBUG, D_BDW, NULL, power_well_ctl_mmio_write);
+ /*
+ * Use an arbitrary power well controlled by the PWR_WELL_CTL
+ * register.
+ */
+ MMIO_DH(HSW_PWR_WELL_CTL_BIOS(HSW_DISP_PW_GLOBAL), D_BDW, NULL,
+ power_well_ctl_mmio_write);
+ MMIO_DH(HSW_PWR_WELL_CTL_DRIVER(HSW_DISP_PW_GLOBAL), D_BDW, NULL,
+ power_well_ctl_mmio_write);
+ MMIO_DH(HSW_PWR_WELL_CTL_KVMR, D_BDW, NULL, power_well_ctl_mmio_write);
+ MMIO_DH(HSW_PWR_WELL_CTL_DEBUG(HSW_DISP_PW_GLOBAL), D_BDW, NULL,
+ power_well_ctl_mmio_write);
MMIO_DH(HSW_PWR_WELL_CTL5, D_BDW, NULL, power_well_ctl_mmio_write);
MMIO_DH(HSW_PWR_WELL_CTL6, D_BDW, NULL, power_well_ctl_mmio_write);
@@ -2581,7 +2598,6 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
MMIO_F(0x24d0, 48, F_CMD_ACCESS, 0, 0, D_BDW_PLUS,
NULL, force_nonpriv_write);
- MMIO_D(0x22040, D_BDW_PLUS);
MMIO_D(0x44484, D_BDW_PLUS);
MMIO_D(0x4448c, D_BDW_PLUS);
@@ -2636,10 +2652,13 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_F(_DPD_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL,
dp_aux_ch_ctl_mmio_write);
- MMIO_D(HSW_PWR_WELL_BIOS, D_SKL_PLUS);
- MMIO_DH(HSW_PWR_WELL_DRIVER, D_SKL_PLUS, NULL,
- skl_power_well_ctl_write);
- MMIO_DH(GEN6_PCODE_MAILBOX, D_SKL_PLUS, NULL, mailbox_write);
+ /*
+ * Use an arbitrary power well controlled by the PWR_WELL_CTL
+ * register.
+ */
+ MMIO_D(HSW_PWR_WELL_CTL_BIOS(SKL_DISP_PW_MISC_IO), D_SKL_PLUS);
+ MMIO_DH(HSW_PWR_WELL_CTL_DRIVER(SKL_DISP_PW_MISC_IO), D_SKL_PLUS, NULL,
+ skl_power_well_ctl_write);
MMIO_D(0xa210, D_SKL_PLUS);
MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
@@ -2831,7 +2850,6 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_D(0x320f0, D_SKL | D_KBL);
MMIO_DFH(_REG_VCS2_EXCC, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(_REG_VECS_EXCC, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_D(0x70034, D_SKL_PLUS);
MMIO_D(0x71034, D_SKL_PLUS);
MMIO_D(0x72034, D_SKL_PLUS);
@@ -2849,10 +2867,7 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
NULL, NULL);
MMIO_D(0x4ab8, D_KBL);
- MMIO_D(0x940c, D_SKL_PLUS);
MMIO_D(0x2248, D_SKL_PLUS | D_KBL);
- MMIO_D(0x4ab0, D_SKL | D_KBL);
- MMIO_D(0x20d4, D_SKL | D_KBL);
return 0;
}
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index fd0c85f9ef3c..83e88c70272a 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -1170,10 +1170,27 @@ vgpu_id_show(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "\n");
}
+static ssize_t
+hw_id_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct mdev_device *mdev = mdev_from_dev(dev);
+
+ if (mdev) {
+ struct intel_vgpu *vgpu = (struct intel_vgpu *)
+ mdev_get_drvdata(mdev);
+ return sprintf(buf, "%u\n",
+ vgpu->shadow_ctx->hw_id);
+ }
+ return sprintf(buf, "\n");
+}
+
static DEVICE_ATTR_RO(vgpu_id);
+static DEVICE_ATTR_RO(hw_id);
static struct attribute *intel_vgpu_attrs[] = {
&dev_attr_vgpu_id.attr,
+ &dev_attr_hw_id.attr,
NULL
};
diff --git a/drivers/gpu/drm/i915/gvt/render.c b/drivers/gpu/drm/i915/gvt/render.c
index 504e57c3bc23..2ea542257f03 100644
--- a/drivers/gpu/drm/i915/gvt/render.c
+++ b/drivers/gpu/drm/i915/gvt/render.c
@@ -207,18 +207,16 @@ static void load_mocs(struct intel_vgpu *vgpu, int ring_id)
offset.reg = regs[ring_id];
for (i = 0; i < 64; i++) {
- gen9_render_mocs[ring_id][i] = I915_READ(offset);
+ gen9_render_mocs[ring_id][i] = I915_READ_FW(offset);
I915_WRITE(offset, vgpu_vreg(vgpu, offset));
- POSTING_READ(offset);
offset.reg += 4;
}
if (ring_id == RCS) {
l3_offset.reg = 0xb020;
for (i = 0; i < 32; i++) {
- gen9_render_mocs_L3[i] = I915_READ(l3_offset);
- I915_WRITE(l3_offset, vgpu_vreg(vgpu, l3_offset));
- POSTING_READ(l3_offset);
+ gen9_render_mocs_L3[i] = I915_READ_FW(l3_offset);
+ I915_WRITE_FW(l3_offset, vgpu_vreg(vgpu, l3_offset));
l3_offset.reg += 4;
}
}
@@ -242,18 +240,16 @@ static void restore_mocs(struct intel_vgpu *vgpu, int ring_id)
offset.reg = regs[ring_id];
for (i = 0; i < 64; i++) {
- vgpu_vreg(vgpu, offset) = I915_READ(offset);
- I915_WRITE(offset, gen9_render_mocs[ring_id][i]);
- POSTING_READ(offset);
+ vgpu_vreg(vgpu, offset) = I915_READ_FW(offset);
+ I915_WRITE_FW(offset, gen9_render_mocs[ring_id][i]);
offset.reg += 4;
}
if (ring_id == RCS) {
l3_offset.reg = 0xb020;
for (i = 0; i < 32; i++) {
- vgpu_vreg(vgpu, l3_offset) = I915_READ(l3_offset);
- I915_WRITE(l3_offset, gen9_render_mocs_L3[i]);
- POSTING_READ(l3_offset);
+ vgpu_vreg(vgpu, l3_offset) = I915_READ_FW(l3_offset);
+ I915_WRITE_FW(l3_offset, gen9_render_mocs_L3[i]);
l3_offset.reg += 4;
}
}
@@ -272,6 +268,7 @@ static void switch_mmio_to_vgpu(struct intel_vgpu *vgpu, int ring_id)
u32 ctx_ctrl = reg_state[CTX_CONTEXT_CONTROL_VAL];
u32 inhibit_mask =
_MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
+ i915_reg_t last_reg = _MMIO(0);
if (IS_SKYLAKE(vgpu->gvt->dev_priv)
|| IS_KABYLAKE(vgpu->gvt->dev_priv)) {
@@ -287,7 +284,7 @@ static void switch_mmio_to_vgpu(struct intel_vgpu *vgpu, int ring_id)
if (mmio->ring_id != ring_id)
continue;
- mmio->value = I915_READ(mmio->reg);
+ mmio->value = I915_READ_FW(mmio->reg);
/*
* if it is an inhibit context, load in_context mmio
@@ -304,13 +301,18 @@ static void switch_mmio_to_vgpu(struct intel_vgpu *vgpu, int ring_id)
else
v = vgpu_vreg(vgpu, mmio->reg);
- I915_WRITE(mmio->reg, v);
- POSTING_READ(mmio->reg);
+ I915_WRITE_FW(mmio->reg, v);
+ last_reg = mmio->reg;
trace_render_mmio(vgpu->id, "load",
i915_mmio_reg_offset(mmio->reg),
mmio->value, v);
}
+
+ /* Make sure the swiched MMIOs has taken effect. */
+ if (likely(INTEL_GVT_MMIO_OFFSET(last_reg)))
+ I915_READ_FW(last_reg);
+
handle_tlb_pending_event(vgpu, ring_id);
}
@@ -319,6 +321,7 @@ static void switch_mmio_to_host(struct intel_vgpu *vgpu, int ring_id)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
struct render_mmio *mmio;
+ i915_reg_t last_reg = _MMIO(0);
u32 v;
int i, array_size;
@@ -335,7 +338,7 @@ static void switch_mmio_to_host(struct intel_vgpu *vgpu, int ring_id)
if (mmio->ring_id != ring_id)
continue;
- vgpu_vreg(vgpu, mmio->reg) = I915_READ(mmio->reg);
+ vgpu_vreg(vgpu, mmio->reg) = I915_READ_FW(mmio->reg);
if (mmio->mask) {
vgpu_vreg(vgpu, mmio->reg) &= ~(mmio->mask << 16);
@@ -346,13 +349,17 @@ static void switch_mmio_to_host(struct intel_vgpu *vgpu, int ring_id)
if (mmio->in_context)
continue;
- I915_WRITE(mmio->reg, v);
- POSTING_READ(mmio->reg);
+ I915_WRITE_FW(mmio->reg, v);
+ last_reg = mmio->reg;
trace_render_mmio(vgpu->id, "restore",
i915_mmio_reg_offset(mmio->reg),
mmio->value, v);
}
+
+ /* Make sure the swiched MMIOs has taken effect. */
+ if (likely(INTEL_GVT_MMIO_OFFSET(last_reg)))
+ I915_READ_FW(last_reg);
}
/**
@@ -367,12 +374,23 @@ static void switch_mmio_to_host(struct intel_vgpu *vgpu, int ring_id)
void intel_gvt_switch_mmio(struct intel_vgpu *pre,
struct intel_vgpu *next, int ring_id)
{
+ struct drm_i915_private *dev_priv;
+
if (WARN_ON(!pre && !next))
return;
gvt_dbg_render("switch ring %d from %s to %s\n", ring_id,
pre ? "vGPU" : "host", next ? "vGPU" : "HOST");
+ dev_priv = pre ? pre->gvt->dev_priv : next->gvt->dev_priv;
+
+ /**
+ * We are using raw mmio access wrapper to improve the
+ * performace for batch mmio read/write, so we need
+ * handle forcewake mannually.
+ */
+ intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+
/**
* TODO: Optimize for vGPU to vGPU switch by merging
* switch_mmio_to_host() and switch_mmio_to_vgpu().
@@ -382,4 +400,6 @@ void intel_gvt_switch_mmio(struct intel_vgpu *pre,
if (next)
switch_mmio_to_vgpu(next, ring_id);
+
+ intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
}
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 22e08eb2d0b7..391800d2067b 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -184,41 +184,52 @@ static int shadow_context_status_change(struct notifier_block *nb,
return NOTIFY_OK;
}
-static int dispatch_workload(struct intel_vgpu_workload *workload)
+static void shadow_context_descriptor_update(struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine)
+{
+ struct intel_context *ce = &ctx->engine[engine->id];
+ u64 desc = 0;
+
+ desc = ce->lrc_desc;
+
+ /* Update bits 0-11 of the context descriptor which includes flags
+ * like GEN8_CTX_* cached in desc_template
+ */
+ desc &= U64_MAX << 12;
+ desc |= ctx->desc_template & ((1ULL << 12) - 1);
+
+ ce->lrc_desc = desc;
+}
+
+/**
+ * intel_gvt_scan_and_shadow_workload - audit the workload by scanning and
+ * shadow it as well, include ringbuffer,wa_ctx and ctx.
+ * @workload: an abstract entity for each execlist submission.
+ *
+ * This function is called before the workload submitting to i915, to make
+ * sure the content of the workload is valid.
+ */
+int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
{
int ring_id = workload->ring_id;
struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
- struct intel_engine_cs *engine = dev_priv->engine[ring_id];
struct drm_i915_gem_request *rq;
struct intel_vgpu *vgpu = workload->vgpu;
- struct intel_ring *ring;
int ret;
- gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n",
- ring_id, workload);
+ lockdep_assert_held(&dev_priv->drm.struct_mutex);
+
+ if (workload->shadowed)
+ return 0;
shadow_ctx->desc_template &= ~(0x3 << GEN8_CTX_ADDRESSING_MODE_SHIFT);
shadow_ctx->desc_template |= workload->ctx_desc.addressing_mode <<
GEN8_CTX_ADDRESSING_MODE_SHIFT;
- mutex_lock(&dev_priv->drm.struct_mutex);
-
- /* pin shadow context by gvt even the shadow context will be pinned
- * when i915 alloc request. That is because gvt will update the guest
- * context from shadow context when workload is completed, and at that
- * moment, i915 may already unpined the shadow context to make the
- * shadow_ctx pages invalid. So gvt need to pin itself. After update
- * the guest context, gvt can unpin the shadow_ctx safely.
- */
- ring = engine->context_pin(engine, shadow_ctx);
- if (IS_ERR(ring)) {
- ret = PTR_ERR(ring);
- gvt_vgpu_err("fail to pin shadow context\n");
- workload->status = ret;
- mutex_unlock(&dev_priv->drm.struct_mutex);
- return ret;
- }
+ if (!test_and_set_bit(ring_id, vgpu->shadow_ctx_desc_updated))
+ shadow_context_descriptor_update(shadow_ctx,
+ dev_priv->engine[ring_id]);
rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx);
if (IS_ERR(rq)) {
@@ -231,7 +242,7 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
workload->req = i915_gem_request_get(rq);
- ret = intel_gvt_scan_and_shadow_workload(workload);
+ ret = intel_gvt_scan_and_shadow_ringbuffer(workload);
if (ret)
goto out;
@@ -246,25 +257,61 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
if (ret)
goto out;
+ workload->shadowed = true;
+
+out:
+ return ret;
+}
+
+static int dispatch_workload(struct intel_vgpu_workload *workload)
+{
+ int ring_id = workload->ring_id;
+ struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
+ struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
+ struct intel_engine_cs *engine = dev_priv->engine[ring_id];
+ struct intel_vgpu *vgpu = workload->vgpu;
+ struct intel_ring *ring;
+ int ret = 0;
+
+ gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n",
+ ring_id, workload);
+
+ mutex_lock(&dev_priv->drm.struct_mutex);
+
+ ret = intel_gvt_scan_and_shadow_workload(workload);
+ if (ret)
+ goto out;
+
if (workload->prepare) {
ret = workload->prepare(workload);
if (ret)
goto out;
}
- gvt_dbg_sched("ring id %d submit workload to i915 %p\n",
- ring_id, workload->req);
+ /* pin shadow context by gvt even the shadow context will be pinned
+ * when i915 alloc request. That is because gvt will update the guest
+ * context from shadow context when workload is completed, and at that
+ * moment, i915 may already unpined the shadow context to make the
+ * shadow_ctx pages invalid. So gvt need to pin itself. After update
+ * the guest context, gvt can unpin the shadow_ctx safely.
+ */
+ ring = engine->context_pin(engine, shadow_ctx);
+ if (IS_ERR(ring)) {
+ ret = PTR_ERR(ring);
+ gvt_vgpu_err("fail to pin shadow context\n");
+ goto out;
+ }
- ret = 0;
- workload->dispatched = true;
out:
if (ret)
workload->status = ret;
- if (!IS_ERR_OR_NULL(rq))
- i915_add_request(rq);
- else
- engine->context_unpin(engine, shadow_ctx);
+ if (!IS_ERR_OR_NULL(workload->req)) {
+ gvt_dbg_sched("ring id %d submit workload to i915 %p\n",
+ ring_id, workload->req);
+ i915_add_request(workload->req);
+ workload->dispatched = true;
+ }
mutex_unlock(&dev_priv->drm.struct_mutex);
return ret;
@@ -617,7 +664,7 @@ err:
void intel_vgpu_clean_gvt_context(struct intel_vgpu *vgpu)
{
- i915_gem_context_put_unlocked(vgpu->shadow_ctx);
+ i915_gem_context_put(vgpu->shadow_ctx);
}
int intel_vgpu_init_gvt_context(struct intel_vgpu *vgpu)
@@ -631,5 +678,7 @@ int intel_vgpu_init_gvt_context(struct intel_vgpu *vgpu)
vgpu->shadow_ctx->engine[RCS].initialised = true;
+ bitmap_zero(vgpu->shadow_ctx_desc_updated, I915_NUM_ENGINES);
+
return 0;
}
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h
index 9b6bf51e9b9b..0d431a968a32 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.h
+++ b/drivers/gpu/drm/i915/gvt/scheduler.h
@@ -82,6 +82,7 @@ struct intel_vgpu_workload {
struct drm_i915_gem_request *req;
/* if this workload has been dispatched to i915? */
bool dispatched;
+ bool shadowed;
int status;
struct intel_vgpu_mm *shadow_mm;
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index 3deadcbd5a24..02c61a1ad56a 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -43,6 +43,7 @@ void populate_pvinfo_page(struct intel_vgpu *vgpu)
vgpu_vreg(vgpu, vgtif_reg(version_minor)) = 0;
vgpu_vreg(vgpu, vgtif_reg(display_ready)) = 0;
vgpu_vreg(vgpu, vgtif_reg(vgt_id)) = vgpu->id;
+ vgpu_vreg(vgpu, vgtif_reg(vgt_caps)) = VGT_CAPS_FULL_48BIT_PPGTT;
vgpu_vreg(vgpu, vgtif_reg(avail_rs.mappable_gmadr.base)) =
vgpu_aperture_gmadr_base(vgpu);
vgpu_vreg(vgpu, vgtif_reg(avail_rs.mappable_gmadr.size)) =
@@ -504,11 +505,11 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
/* full GPU reset or device model level reset */
if (engine_mask == ALL_ENGINES || dmlr) {
- intel_vgpu_reset_gtt(vgpu, dmlr);
-
/*fence will not be reset during virtual reset */
- if (dmlr)
+ if (dmlr) {
+ intel_vgpu_reset_gtt(vgpu);
intel_vgpu_reset_resource(vgpu);
+ }
intel_vgpu_reset_mmio(vgpu, dmlr);
populate_pvinfo_page(vgpu);
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index f0cb22cc0dd6..8ba932b22f7c 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -1073,7 +1073,7 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
goto unpin_src;
}
- dst = i915_gem_object_pin_map(dst_obj, I915_MAP_WB);
+ dst = i915_gem_object_pin_map(dst_obj, I915_MAP_FORCE_WB);
if (IS_ERR(dst))
goto unpin_dst;
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 00d8967c8512..e4d4b6b41e26 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -28,6 +28,7 @@
#include <linux/debugfs.h>
#include <linux/sort.h>
+#include <linux/sched/mm.h>
#include "intel_drv.h"
static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
@@ -543,75 +544,6 @@ static int i915_gem_gtt_info(struct seq_file *m, void *data)
return 0;
}
-static int i915_gem_pageflip_info(struct seq_file *m, void *data)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct drm_device *dev = &dev_priv->drm;
- struct intel_crtc *crtc;
- int ret;
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
-
- for_each_intel_crtc(dev, crtc) {
- const char pipe = pipe_name(crtc->pipe);
- const char plane = plane_name(crtc->plane);
- struct intel_flip_work *work;
-
- spin_lock_irq(&dev->event_lock);
- work = crtc->flip_work;
- if (work == NULL) {
- seq_printf(m, "No flip due on pipe %c (plane %c)\n",
- pipe, plane);
- } else {
- u32 pending;
- u32 addr;
-
- pending = atomic_read(&work->pending);
- if (pending) {
- seq_printf(m, "Flip ioctl preparing on pipe %c (plane %c)\n",
- pipe, plane);
- } else {
- seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n",
- pipe, plane);
- }
- if (work->flip_queued_req) {
- struct intel_engine_cs *engine = work->flip_queued_req->engine;
-
- seq_printf(m, "Flip queued on %s at seqno %x, last submitted seqno %x [current breadcrumb %x], completed? %d\n",
- engine->name,
- work->flip_queued_req->global_seqno,
- intel_engine_last_submit(engine),
- intel_engine_get_seqno(engine),
- i915_gem_request_completed(work->flip_queued_req));
- } else
- seq_printf(m, "Flip not associated with any ring\n");
- seq_printf(m, "Flip queued on frame %d, (was ready on frame %d), now %d\n",
- work->flip_queued_vblank,
- work->flip_ready_vblank,
- intel_crtc_get_vblank_counter(crtc));
- seq_printf(m, "%d prepares\n", atomic_read(&work->pending));
-
- if (INTEL_GEN(dev_priv) >= 4)
- addr = I915_HI_DISPBASE(I915_READ(DSPSURF(crtc->plane)));
- else
- addr = I915_READ(DSPADDR(crtc->plane));
- seq_printf(m, "Current scanout address 0x%08x\n", addr);
-
- if (work->pending_flip_obj) {
- seq_printf(m, "New framebuffer address 0x%08lx\n", (long)work->gtt_offset);
- seq_printf(m, "MMIO update completed? %d\n", addr == work->gtt_offset);
- }
- }
- spin_unlock_irq(&dev->event_lock);
- }
-
- mutex_unlock(&dev->struct_mutex);
-
- return 0;
-}
-
static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
@@ -1159,7 +1091,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
reqf = I915_READ(GEN6_RPNSWREQ);
- if (IS_GEN9(dev_priv))
+ if (INTEL_GEN(dev_priv) >= 9)
reqf >>= 23;
else {
reqf &= ~GEN6_TURBO_DISABLE;
@@ -1181,7 +1113,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
- if (IS_GEN9(dev_priv))
+ if (INTEL_GEN(dev_priv) >= 9)
cagf = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT;
else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
@@ -1210,7 +1142,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
dev_priv->rps.pm_intrmsk_mbz);
seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
seq_printf(m, "Render p-state ratio: %d\n",
- (gt_perf_status & (IS_GEN9(dev_priv) ? 0x1ff00 : 0xff00)) >> 8);
+ (gt_perf_status & (INTEL_GEN(dev_priv) >= 9 ? 0x1ff00 : 0xff00)) >> 8);
seq_printf(m, "Render p-state VID: %d\n",
gt_perf_status & 0xff);
seq_printf(m, "Render p-state limit: %d\n",
@@ -1241,18 +1173,21 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 :
rp_state_cap >> 16) & 0xff;
- max_freq *= (IS_GEN9_BC(dev_priv) ? GEN9_FREQ_SCALER : 1);
+ max_freq *= (IS_GEN9_BC(dev_priv) ||
+ IS_CANNONLAKE(dev_priv) ? GEN9_FREQ_SCALER : 1);
seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
intel_gpu_freq(dev_priv, max_freq));
max_freq = (rp_state_cap & 0xff00) >> 8;
- max_freq *= (IS_GEN9_BC(dev_priv) ? GEN9_FREQ_SCALER : 1);
+ max_freq *= (IS_GEN9_BC(dev_priv) ||
+ IS_CANNONLAKE(dev_priv) ? GEN9_FREQ_SCALER : 1);
seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
intel_gpu_freq(dev_priv, max_freq));
max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 16 :
rp_state_cap >> 0) & 0xff;
- max_freq *= (IS_GEN9_BC(dev_priv) ? GEN9_FREQ_SCALER : 1);
+ max_freq *= (IS_GEN9_BC(dev_priv) ||
+ IS_CANNONLAKE(dev_priv) ? GEN9_FREQ_SCALER : 1);
seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
intel_gpu_freq(dev_priv, max_freq));
seq_printf(m, "Max overclocked frequency: %dMHz\n",
@@ -1407,6 +1342,23 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
return 0;
}
+static int i915_reset_info(struct seq_file *m, void *unused)
+{
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct i915_gpu_error *error = &dev_priv->gpu_error;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ seq_printf(m, "full gpu reset = %u\n", i915_reset_count(error));
+
+ for_each_engine(engine, dev_priv, id) {
+ seq_printf(m, "%s = %u\n", engine->name,
+ i915_reset_engine_count(error, engine));
+ }
+
+ return 0;
+}
+
static int ironlake_drpc_info(struct seq_file *m)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
@@ -1838,7 +1790,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
if (ret)
goto out;
- if (IS_GEN9_BC(dev_priv)) {
+ if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) {
/* Convert GT frequency to 50 HZ units */
min_gpu_freq =
dev_priv->rps.min_freq_softlimit / GEN9_FREQ_SCALER;
@@ -1858,7 +1810,8 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
&ia_freq);
seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
intel_gpu_freq(dev_priv, (gpu_freq *
- (IS_GEN9_BC(dev_priv) ?
+ (IS_GEN9_BC(dev_priv) ||
+ IS_CANNONLAKE(dev_priv) ?
GEN9_FREQ_SCALER : 1))),
((ia_freq >> 0) & 0xff) * 100,
((ia_freq >> 8) & 0xff) * 100);
@@ -1914,7 +1867,7 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
return ret;
#ifdef CONFIG_DRM_FBDEV_EMULATION
- if (dev_priv->fbdev) {
+ if (dev_priv->fbdev && dev_priv->fbdev->helper.fb) {
fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb);
seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
@@ -1970,7 +1923,7 @@ static int i915_context_status(struct seq_file *m, void *unused)
if (ret)
return ret;
- list_for_each_entry(ctx, &dev_priv->context_list, link) {
+ list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
seq_printf(m, "HW context %u ", ctx->hw_id);
if (ctx->pid) {
struct task_struct *task;
@@ -2002,12 +1955,6 @@ static int i915_context_status(struct seq_file *m, void *unused)
seq_putc(m, '\n');
}
- seq_printf(m,
- "\tvma hashtable size=%u (actual %lu), count=%u\n",
- ctx->vma_lut.ht_size,
- BIT(ctx->vma_lut.ht_bits),
- ctx->vma_lut.ht_count);
-
seq_putc(m, '\n');
}
@@ -2076,7 +2023,7 @@ static int i915_dump_lrc(struct seq_file *m, void *unused)
if (ret)
return ret;
- list_for_each_entry(ctx, &dev_priv->context_list, link)
+ list_for_each_entry(ctx, &dev_priv->contexts.list, link)
for_each_engine(engine, dev_priv, id)
i915_dump_lrc_obj(m, ctx, engine);
@@ -2310,6 +2257,8 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
seq_printf(m, "GPU busy? %s [%d requests]\n",
yesno(dev_priv->gt.awake), dev_priv->gt.active_requests);
seq_printf(m, "CPU waiting? %d\n", count_irq_waiters(dev_priv));
+ seq_printf(m, "Boosts outstanding? %d\n",
+ atomic_read(&dev_priv->rps.num_waiters));
seq_printf(m, "Frequency requested %d\n",
intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq));
seq_printf(m, " min hard:%d, soft:%d; max soft:%d, hard:%d\n",
@@ -2323,22 +2272,20 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
intel_gpu_freq(dev_priv, dev_priv->rps.boost_freq));
mutex_lock(&dev->filelist_mutex);
- spin_lock(&dev_priv->rps.client_lock);
list_for_each_entry_reverse(file, &dev->filelist, lhead) {
struct drm_i915_file_private *file_priv = file->driver_priv;
struct task_struct *task;
rcu_read_lock();
task = pid_task(file->pid, PIDTYPE_PID);
- seq_printf(m, "%s [%d]: %d boosts%s\n",
+ seq_printf(m, "%s [%d]: %d boosts\n",
task ? task->comm : "<unknown>",
task ? task->pid : -1,
- file_priv->rps.boosts,
- list_empty(&file_priv->rps.link) ? "" : ", active");
+ atomic_read(&file_priv->rps.boosts));
rcu_read_unlock();
}
- seq_printf(m, "Kernel (anonymous) boosts: %d\n", dev_priv->rps.boosts);
- spin_unlock(&dev_priv->rps.client_lock);
+ seq_printf(m, "Kernel (anonymous) boosts: %d\n",
+ atomic_read(&dev_priv->rps.boosts));
mutex_unlock(&dev->filelist_mutex);
if (INTEL_GEN(dev_priv) >= 6 &&
@@ -2831,7 +2778,7 @@ out:
static int i915_energy_uJ(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
- u64 power;
+ unsigned long long power;
u32 units;
if (INTEL_GEN(dev_priv) < 6)
@@ -2839,15 +2786,18 @@ static int i915_energy_uJ(struct seq_file *m, void *data)
intel_runtime_pm_get(dev_priv);
- rdmsrl(MSR_RAPL_POWER_UNIT, power);
- power = (power & 0x1f00) >> 8;
- units = 1000000 / (1 << power); /* convert to uJ */
+ if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &power)) {
+ intel_runtime_pm_put(dev_priv);
+ return -ENODEV;
+ }
+
+ units = (power & 0x1f00) >> 8;
power = I915_READ(MCH_SECP_NRG_STTS);
- power *= units;
+ power = (1000000 * power) >> units; /* convert to uJ */
intel_runtime_pm_put(dev_priv);
- seq_printf(m, "%llu", (long long unsigned)power);
+ seq_printf(m, "%llu", power);
return 0;
}
@@ -3289,6 +3239,7 @@ static int i915_display_info(struct seq_file *m, void *unused)
static int i915_engine_info(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct i915_gpu_error *error = &dev_priv->gpu_error;
struct intel_engine_cs *engine;
enum intel_engine_id id;
@@ -3312,6 +3263,8 @@ static int i915_engine_info(struct seq_file *m, void *unused)
engine->hangcheck.seqno,
jiffies_to_msecs(jiffies - engine->hangcheck.action_timestamp),
engine->timeline->inflight_seqnos);
+ seq_printf(m, "\tReset count: %d\n",
+ i915_reset_engine_count(error, engine));
rcu_read_lock();
@@ -3370,8 +3323,10 @@ static int i915_engine_info(struct seq_file *m, void *unused)
ptr = I915_READ(RING_CONTEXT_STATUS_PTR(engine));
read = GEN8_CSB_READ_PTR(ptr);
write = GEN8_CSB_WRITE_PTR(ptr);
- seq_printf(m, "\tExeclist CSB read %d, write %d\n",
- read, write);
+ seq_printf(m, "\tExeclist CSB read %d, write %d, interrupt posted? %s\n",
+ read, write,
+ yesno(test_bit(ENGINE_IRQ_EXECLIST,
+ &engine->irq_posted)));
if (read >= GEN8_CSB_ENTRIES)
read = 0;
if (write >= GEN8_CSB_ENTRIES)
@@ -3758,13 +3713,18 @@ static ssize_t i915_displayport_test_active_write(struct file *file,
drm_connector_list_iter_begin(dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) {
+ struct intel_encoder *encoder;
+
if (connector->connector_type !=
DRM_MODE_CONNECTOR_DisplayPort)
continue;
- if (connector->status == connector_status_connected &&
- connector->encoder != NULL) {
- intel_dp = enc_to_intel_dp(connector->encoder);
+ encoder = to_intel_encoder(connector->encoder);
+ if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
+ continue;
+
+ if (encoder && connector->status == connector_status_connected) {
+ intel_dp = enc_to_intel_dp(&encoder->base);
status = kstrtoint(input_buffer, 10, &val);
if (status < 0)
break;
@@ -3796,13 +3756,18 @@ static int i915_displayport_test_active_show(struct seq_file *m, void *data)
drm_connector_list_iter_begin(dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) {
+ struct intel_encoder *encoder;
+
if (connector->connector_type !=
DRM_MODE_CONNECTOR_DisplayPort)
continue;
- if (connector->status == connector_status_connected &&
- connector->encoder != NULL) {
- intel_dp = enc_to_intel_dp(connector->encoder);
+ encoder = to_intel_encoder(connector->encoder);
+ if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
+ continue;
+
+ if (encoder && connector->status == connector_status_connected) {
+ intel_dp = enc_to_intel_dp(&encoder->base);
if (intel_dp->compliance.test_active)
seq_puts(m, "1");
else
@@ -3842,13 +3807,18 @@ static int i915_displayport_test_data_show(struct seq_file *m, void *data)
drm_connector_list_iter_begin(dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) {
+ struct intel_encoder *encoder;
+
if (connector->connector_type !=
DRM_MODE_CONNECTOR_DisplayPort)
continue;
- if (connector->status == connector_status_connected &&
- connector->encoder != NULL) {
- intel_dp = enc_to_intel_dp(connector->encoder);
+ encoder = to_intel_encoder(connector->encoder);
+ if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
+ continue;
+
+ if (encoder && connector->status == connector_status_connected) {
+ intel_dp = enc_to_intel_dp(&encoder->base);
if (intel_dp->compliance.test_type ==
DP_TEST_LINK_EDID_READ)
seq_printf(m, "%lx",
@@ -3895,13 +3865,18 @@ static int i915_displayport_test_type_show(struct seq_file *m, void *data)
drm_connector_list_iter_begin(dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) {
+ struct intel_encoder *encoder;
+
if (connector->connector_type !=
DRM_MODE_CONNECTOR_DisplayPort)
continue;
- if (connector->status == connector_status_connected &&
- connector->encoder != NULL) {
- intel_dp = enc_to_intel_dp(connector->encoder);
+ encoder = to_intel_encoder(connector->encoder);
+ if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
+ continue;
+
+ if (encoder && connector->status == connector_status_connected) {
+ intel_dp = enc_to_intel_dp(&encoder->base);
seq_printf(m, "%02lx", intel_dp->compliance.test_type);
} else
seq_puts(m, "0");
@@ -4331,16 +4306,16 @@ i915_drop_caches_set(void *data, u64 val)
mutex_unlock(&dev->struct_mutex);
}
- lockdep_set_current_reclaim_state(GFP_KERNEL);
+ fs_reclaim_acquire(GFP_KERNEL);
if (val & DROP_BOUND)
- i915_gem_shrink(dev_priv, LONG_MAX, I915_SHRINK_BOUND);
+ i915_gem_shrink(dev_priv, LONG_MAX, NULL, I915_SHRINK_BOUND);
if (val & DROP_UNBOUND)
- i915_gem_shrink(dev_priv, LONG_MAX, I915_SHRINK_UNBOUND);
+ i915_gem_shrink(dev_priv, LONG_MAX, NULL, I915_SHRINK_UNBOUND);
if (val & DROP_SHRINK_ALL)
i915_gem_shrink_all(dev_priv);
- lockdep_clear_current_reclaim_state();
+ fs_reclaim_release(GFP_KERNEL);
if (val & DROP_FREED) {
synchronize_rcu();
@@ -4580,7 +4555,7 @@ static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
sseu->slice_mask |= BIT(s);
- if (IS_GEN9_BC(dev_priv))
+ if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv))
sseu->subslice_mask =
INTEL_INFO(dev_priv)->sseu.subslice_mask;
@@ -4810,7 +4785,6 @@ static const struct drm_info_list i915_debugfs_list[] = {
{"i915_gem_gtt", i915_gem_gtt_info, 0},
{"i915_gem_pin_display", i915_gem_gtt_info, 0, (void *)1},
{"i915_gem_stolen", i915_gem_stolen_list_info },
- {"i915_gem_pageflip", i915_gem_pageflip_info, 0},
{"i915_gem_request", i915_gem_request_info, 0},
{"i915_gem_seqno", i915_gem_seqno_info, 0},
{"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
@@ -4824,6 +4798,7 @@ static const struct drm_info_list i915_debugfs_list[] = {
{"i915_huc_load_status", i915_huc_load_status_info, 0},
{"i915_frequency_info", i915_frequency_info, 0},
{"i915_hangcheck_info", i915_hangcheck_info, 0},
+ {"i915_reset_info", i915_reset_info, 0},
{"i915_drpc_info", i915_drpc_info, 0},
{"i915_emon_status", i915_emon_status, 0},
{"i915_ring_freq_table", i915_ring_freq_table, 0},
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index fc307e03943c..9f45cfeae775 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -132,9 +132,13 @@ static enum intel_pch intel_virt_detect_pch(struct drm_i915_private *dev_priv)
DRM_DEBUG_KMS("Assuming Ibex Peak PCH\n");
} else if (IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv)) {
ret = PCH_CPT;
- DRM_DEBUG_KMS("Assuming CouarPoint PCH\n");
+ DRM_DEBUG_KMS("Assuming CougarPoint PCH\n");
} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
ret = PCH_LPT;
+ if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
+ dev_priv->pch_id = INTEL_PCH_LPT_LP_DEVICE_ID_TYPE;
+ else
+ dev_priv->pch_id = INTEL_PCH_LPT_DEVICE_ID_TYPE;
DRM_DEBUG_KMS("Assuming LynxPoint PCH\n");
} else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
ret = PCH_SPT;
@@ -173,29 +177,25 @@ static void intel_detect_pch(struct drm_i915_private *dev_priv)
while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) {
if (pch->vendor == PCI_VENDOR_ID_INTEL) {
unsigned short id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
- unsigned short id_ext = pch->device &
- INTEL_PCH_DEVICE_ID_MASK_EXT;
+
+ dev_priv->pch_id = id;
if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
- dev_priv->pch_id = id;
dev_priv->pch_type = PCH_IBX;
DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
WARN_ON(!IS_GEN5(dev_priv));
} else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
- dev_priv->pch_id = id;
dev_priv->pch_type = PCH_CPT;
DRM_DEBUG_KMS("Found CougarPoint PCH\n");
- WARN_ON(!(IS_GEN6(dev_priv) ||
- IS_IVYBRIDGE(dev_priv)));
+ WARN_ON(!IS_GEN6(dev_priv) &&
+ !IS_IVYBRIDGE(dev_priv));
} else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
/* PantherPoint is CPT compatible */
- dev_priv->pch_id = id;
dev_priv->pch_type = PCH_CPT;
DRM_DEBUG_KMS("Found PantherPoint PCH\n");
- WARN_ON(!(IS_GEN6(dev_priv) ||
- IS_IVYBRIDGE(dev_priv)));
+ WARN_ON(!IS_GEN6(dev_priv) &&
+ !IS_IVYBRIDGE(dev_priv));
} else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
- dev_priv->pch_id = id;
dev_priv->pch_type = PCH_LPT;
DRM_DEBUG_KMS("Found LynxPoint PCH\n");
WARN_ON(!IS_HASWELL(dev_priv) &&
@@ -203,51 +203,60 @@ static void intel_detect_pch(struct drm_i915_private *dev_priv)
WARN_ON(IS_HSW_ULT(dev_priv) ||
IS_BDW_ULT(dev_priv));
} else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
- dev_priv->pch_id = id;
dev_priv->pch_type = PCH_LPT;
DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
WARN_ON(!IS_HASWELL(dev_priv) &&
!IS_BROADWELL(dev_priv));
WARN_ON(!IS_HSW_ULT(dev_priv) &&
!IS_BDW_ULT(dev_priv));
+ } else if (id == INTEL_PCH_WPT_DEVICE_ID_TYPE) {
+ /* WildcatPoint is LPT compatible */
+ dev_priv->pch_type = PCH_LPT;
+ DRM_DEBUG_KMS("Found WildcatPoint PCH\n");
+ WARN_ON(!IS_HASWELL(dev_priv) &&
+ !IS_BROADWELL(dev_priv));
+ WARN_ON(IS_HSW_ULT(dev_priv) ||
+ IS_BDW_ULT(dev_priv));
+ } else if (id == INTEL_PCH_WPT_LP_DEVICE_ID_TYPE) {
+ /* WildcatPoint is LPT compatible */
+ dev_priv->pch_type = PCH_LPT;
+ DRM_DEBUG_KMS("Found WildcatPoint LP PCH\n");
+ WARN_ON(!IS_HASWELL(dev_priv) &&
+ !IS_BROADWELL(dev_priv));
+ WARN_ON(!IS_HSW_ULT(dev_priv) &&
+ !IS_BDW_ULT(dev_priv));
} else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) {
- dev_priv->pch_id = id;
dev_priv->pch_type = PCH_SPT;
DRM_DEBUG_KMS("Found SunrisePoint PCH\n");
WARN_ON(!IS_SKYLAKE(dev_priv) &&
!IS_KABYLAKE(dev_priv));
- } else if (id_ext == INTEL_PCH_SPT_LP_DEVICE_ID_TYPE) {
- dev_priv->pch_id = id_ext;
+ } else if (id == INTEL_PCH_SPT_LP_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_SPT;
DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
WARN_ON(!IS_SKYLAKE(dev_priv) &&
!IS_KABYLAKE(dev_priv));
} else if (id == INTEL_PCH_KBP_DEVICE_ID_TYPE) {
- dev_priv->pch_id = id;
dev_priv->pch_type = PCH_KBP;
- DRM_DEBUG_KMS("Found KabyPoint PCH\n");
+ DRM_DEBUG_KMS("Found Kaby Lake PCH (KBP)\n");
WARN_ON(!IS_SKYLAKE(dev_priv) &&
!IS_KABYLAKE(dev_priv));
} else if (id == INTEL_PCH_CNP_DEVICE_ID_TYPE) {
- dev_priv->pch_id = id;
dev_priv->pch_type = PCH_CNP;
- DRM_DEBUG_KMS("Found CannonPoint PCH\n");
+ DRM_DEBUG_KMS("Found Cannon Lake PCH (CNP)\n");
WARN_ON(!IS_CANNONLAKE(dev_priv) &&
!IS_COFFEELAKE(dev_priv));
- } else if (id_ext == INTEL_PCH_CNP_LP_DEVICE_ID_TYPE) {
- dev_priv->pch_id = id_ext;
+ } else if (id == INTEL_PCH_CNP_LP_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_CNP;
- DRM_DEBUG_KMS("Found CannonPoint LP PCH\n");
+ DRM_DEBUG_KMS("Found Cannon Lake LP PCH (CNP-LP)\n");
WARN_ON(!IS_CANNONLAKE(dev_priv) &&
!IS_COFFEELAKE(dev_priv));
- } else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) ||
- (id == INTEL_PCH_P3X_DEVICE_ID_TYPE) ||
- ((id == INTEL_PCH_QEMU_DEVICE_ID_TYPE) &&
+ } else if (id == INTEL_PCH_P2X_DEVICE_ID_TYPE ||
+ id == INTEL_PCH_P3X_DEVICE_ID_TYPE ||
+ (id == INTEL_PCH_QEMU_DEVICE_ID_TYPE &&
pch->subsystem_vendor ==
PCI_SUBVENDOR_ID_REDHAT_QUMRANET &&
pch->subsystem_device ==
PCI_SUBDEVICE_ID_QEMU)) {
- dev_priv->pch_id = id;
dev_priv->pch_type =
intel_virt_detect_pch(dev_priv);
} else
@@ -331,6 +340,8 @@ static int i915_getparam(struct drm_device *dev, void *data,
break;
case I915_PARAM_HAS_GPU_RESET:
value = i915.enable_hangcheck && intel_has_gpu_reset(dev_priv);
+ if (value && intel_has_reset_engine(dev_priv))
+ value = 2;
break;
case I915_PARAM_HAS_RESOURCE_STREAMER:
value = HAS_RESOURCE_STREAMER(dev_priv);
@@ -377,6 +388,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
case I915_PARAM_HAS_EXEC_FENCE:
case I915_PARAM_HAS_EXEC_CAPTURE:
case I915_PARAM_HAS_EXEC_BATCH_FIRST:
+ case I915_PARAM_HAS_EXEC_FENCE_ARRAY:
/* For the time being all of these are always true;
* if some supported hardware does not have one of these
* features this value needs to be provided from
@@ -585,16 +597,19 @@ static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
static void i915_gem_fini(struct drm_i915_private *dev_priv)
{
+ /* Flush any outstanding unpin_work. */
+ i915_gem_drain_workqueue(dev_priv);
+
mutex_lock(&dev_priv->drm.struct_mutex);
intel_uc_fini_hw(dev_priv);
i915_gem_cleanup_engines(dev_priv);
- i915_gem_context_fini(dev_priv);
+ i915_gem_contexts_fini(dev_priv);
i915_gem_cleanup_userptr(dev_priv);
mutex_unlock(&dev_priv->drm.struct_mutex);
i915_gem_drain_freed_objects(dev_priv);
- WARN_ON(!list_empty(&dev_priv->context_list));
+ WARN_ON(!list_empty(&dev_priv->contexts.list));
}
static int i915_load_modeset_init(struct drm_device *dev)
@@ -862,7 +877,6 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv,
spin_lock_init(&dev_priv->uncore.lock);
spin_lock_init(&dev_priv->mm.object_stat_lock);
- spin_lock_init(&dev_priv->mmio_flip_lock);
mutex_init(&dev_priv->sb_lock);
mutex_init(&dev_priv->modeset_restore_lock);
mutex_init(&dev_priv->av_mutex);
@@ -1227,6 +1241,7 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
*/
static void i915_driver_unregister(struct drm_i915_private *dev_priv)
{
+ intel_fbdev_unregister(dev_priv);
intel_audio_deinit(dev_priv);
intel_gpu_ips_teardown();
@@ -1319,7 +1334,7 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
ret = i915_load_modeset_init(&dev_priv->drm);
if (ret < 0)
- goto out_cleanup_vblank;
+ goto out_cleanup_hw;
i915_driver_register(dev_priv);
@@ -1336,8 +1351,6 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
-out_cleanup_vblank:
- drm_vblank_cleanup(&dev_priv->drm);
out_cleanup_hw:
i915_driver_cleanup_hw(dev_priv);
out_cleanup_mmio:
@@ -1360,7 +1373,7 @@ void i915_driver_unload(struct drm_device *dev)
struct drm_i915_private *dev_priv = to_i915(dev);
struct pci_dev *pdev = dev_priv->drm.pdev;
- intel_fbdev_fini(dev);
+ i915_driver_unregister(dev_priv);
if (i915_gem_suspend(dev_priv))
DRM_ERROR("failed to idle hardware; continuing to unload!\n");
@@ -1371,10 +1384,6 @@ void i915_driver_unload(struct drm_device *dev)
intel_gvt_cleanup(dev_priv);
- i915_driver_unregister(dev_priv);
-
- drm_vblank_cleanup(dev);
-
intel_modeset_cleanup(dev);
/*
@@ -1400,9 +1409,6 @@ void i915_driver_unload(struct drm_device *dev)
cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
i915_reset_error_state(dev_priv);
- /* Flush any outstanding unpin_work. */
- drain_workqueue(dev_priv->wq);
-
i915_gem_fini(dev_priv);
intel_uc_fini_fw(dev_priv);
intel_fbc_cleanup_cfb(dev_priv);
@@ -1427,9 +1433,10 @@ static void i915_driver_release(struct drm_device *dev)
static int i915_driver_open(struct drm_device *dev, struct drm_file *file)
{
+ struct drm_i915_private *i915 = to_i915(dev);
int ret;
- ret = i915_gem_open(dev, file);
+ ret = i915_gem_open(i915, file);
if (ret)
return ret;
@@ -1459,7 +1466,7 @@ static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
struct drm_i915_file_private *file_priv = file->driver_priv;
mutex_lock(&dev->struct_mutex);
- i915_gem_context_close(dev, file);
+ i915_gem_context_close(file);
i915_gem_release(dev, file);
mutex_unlock(&dev->struct_mutex);
@@ -1825,7 +1832,8 @@ static int i915_resume_switcheroo(struct drm_device *dev)
/**
* i915_reset - reset chip after a hang
- * @dev_priv: device private to reset
+ * @i915: #drm_i915_private to reset
+ * @flags: Instructions
*
* Reset the chip. Useful if a hang is detected. Marks the device as wedged
* on failure.
@@ -1840,33 +1848,34 @@ static int i915_resume_switcheroo(struct drm_device *dev)
* - re-init interrupt state
* - re-init display
*/
-void i915_reset(struct drm_i915_private *dev_priv)
+void i915_reset(struct drm_i915_private *i915, unsigned int flags)
{
- struct i915_gpu_error *error = &dev_priv->gpu_error;
+ struct i915_gpu_error *error = &i915->gpu_error;
int ret;
- lockdep_assert_held(&dev_priv->drm.struct_mutex);
+ lockdep_assert_held(&i915->drm.struct_mutex);
GEM_BUG_ON(!test_bit(I915_RESET_BACKOFF, &error->flags));
if (!test_bit(I915_RESET_HANDOFF, &error->flags))
return;
/* Clear any previous failed attempts at recovery. Time to try again. */
- if (!i915_gem_unset_wedged(dev_priv))
+ if (!i915_gem_unset_wedged(i915))
goto wakeup;
+ if (!(flags & I915_RESET_QUIET))
+ dev_notice(i915->drm.dev, "Resetting chip after gpu hang\n");
error->reset_count++;
- pr_notice("drm/i915: Resetting chip after gpu hang\n");
- disable_irq(dev_priv->drm.irq);
- ret = i915_gem_reset_prepare(dev_priv);
+ disable_irq(i915->drm.irq);
+ ret = i915_gem_reset_prepare(i915);
if (ret) {
DRM_ERROR("GPU recovery failed\n");
- intel_gpu_reset(dev_priv, ALL_ENGINES);
+ intel_gpu_reset(i915, ALL_ENGINES);
goto error;
}
- ret = intel_gpu_reset(dev_priv, ALL_ENGINES);
+ ret = intel_gpu_reset(i915, ALL_ENGINES);
if (ret) {
if (ret != -ENODEV)
DRM_ERROR("Failed to reset chip: %i\n", ret);
@@ -1875,16 +1884,22 @@ void i915_reset(struct drm_i915_private *dev_priv)
goto error;
}
- i915_gem_reset(dev_priv);
- intel_overlay_reset(dev_priv);
+ i915_gem_reset(i915);
+ intel_overlay_reset(i915);
/* Ok, now get things going again... */
/*
* Everything depends on having the GTT running, so we need to start
- * there. Fortunately we don't need to do this unless we reset the
- * chip at a PCI level.
- *
+ * there.
+ */
+ ret = i915_ggtt_enable_hw(i915);
+ if (ret) {
+ DRM_ERROR("Failed to re-enable GGTT following reset %d\n", ret);
+ goto error;
+ }
+
+ /*
* Next we need to restore the context, but we don't use those
* yet either...
*
@@ -1892,17 +1907,17 @@ void i915_reset(struct drm_i915_private *dev_priv)
* was running at the time of the reset (i.e. we weren't VT
* switched away).
*/
- ret = i915_gem_init_hw(dev_priv);
+ ret = i915_gem_init_hw(i915);
if (ret) {
DRM_ERROR("Failed hw init on reset %d\n", ret);
goto error;
}
- i915_queue_hangcheck(dev_priv);
+ i915_queue_hangcheck(i915);
finish:
- i915_gem_reset_finish(dev_priv);
- enable_irq(dev_priv->drm.irq);
+ i915_gem_reset_finish(i915);
+ enable_irq(i915->drm.irq);
wakeup:
clear_bit(I915_RESET_HANDOFF, &error->flags);
@@ -1910,10 +1925,74 @@ wakeup:
return;
error:
- i915_gem_set_wedged(dev_priv);
+ i915_gem_set_wedged(i915);
+ i915_gem_retire_requests(i915);
goto finish;
}
+/**
+ * i915_reset_engine - reset GPU engine to recover from a hang
+ * @engine: engine to reset
+ * @flags: options
+ *
+ * Reset a specific GPU engine. Useful if a hang is detected.
+ * Returns zero on successful reset or otherwise an error code.
+ *
+ * Procedure is:
+ * - identifies the request that caused the hang and it is dropped
+ * - reset engine (which will force the engine to idle)
+ * - re-init/configure engine
+ */
+int i915_reset_engine(struct intel_engine_cs *engine, unsigned int flags)
+{
+ struct i915_gpu_error *error = &engine->i915->gpu_error;
+ struct drm_i915_gem_request *active_request;
+ int ret;
+
+ GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, &error->flags));
+
+ if (!(flags & I915_RESET_QUIET)) {
+ dev_notice(engine->i915->drm.dev,
+ "Resetting %s after gpu hang\n", engine->name);
+ }
+ error->reset_engine_count[engine->id]++;
+
+ active_request = i915_gem_reset_prepare_engine(engine);
+ if (IS_ERR(active_request)) {
+ DRM_DEBUG_DRIVER("Previous reset failed, promote to full reset\n");
+ ret = PTR_ERR(active_request);
+ goto out;
+ }
+
+ ret = intel_gpu_reset(engine->i915, intel_engine_flag(engine));
+ if (ret) {
+ /* If we fail here, we expect to fallback to a global reset */
+ DRM_DEBUG_DRIVER("Failed to reset %s, ret=%d\n",
+ engine->name, ret);
+ goto out;
+ }
+
+ /*
+ * The request that caused the hang is stuck on elsp, we know the
+ * active request and can drop it, adjust head to skip the offending
+ * request to resume executing remaining requests in the queue.
+ */
+ i915_gem_reset_engine(engine, active_request);
+
+ /*
+ * The engine and its registers (and workarounds in case of render)
+ * have been reset to their default values. Follow the init_ring
+ * process to program RING_MODE, HWSP and re-enable submission.
+ */
+ ret = engine->init_hw(engine);
+ if (ret)
+ goto out;
+
+out:
+ i915_gem_reset_finish_engine(engine);
+ return ret;
+}
+
static int i915_pm_suspend(struct device *kdev)
{
struct pci_dev *pdev = to_pci_dev(kdev);
@@ -2657,6 +2736,8 @@ static const struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_PERF_OPEN, i915_perf_open_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_PERF_ADD_CONFIG, i915_perf_add_config_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_PERF_REMOVE_CONFIG, i915_perf_remove_config_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
};
static struct drm_driver driver = {
@@ -2665,12 +2746,11 @@ static struct drm_driver driver = {
*/
.driver_features =
DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME |
- DRIVER_RENDER | DRIVER_MODESET | DRIVER_ATOMIC,
+ DRIVER_RENDER | DRIVER_MODESET | DRIVER_ATOMIC | DRIVER_SYNCOBJ,
.release = i915_driver_release,
.open = i915_driver_open,
.lastclose = i915_driver_lastclose,
.postclose = i915_driver_postclose,
- .set_busid = drm_pci_set_busid,
.gem_close_object = i915_gem_close_object,
.gem_free_object_unlocked = i915_gem_free_object,
@@ -2683,7 +2763,6 @@ static struct drm_driver driver = {
.dumb_create = i915_gem_dumb_create,
.dumb_map_offset = i915_gem_mmap_gtt,
- .dumb_destroy = drm_gem_dumb_destroy,
.ioctls = i915_ioctls,
.num_ioctls = ARRAY_SIZE(i915_ioctls),
.fops = &i915_driver_fops,
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index e1f7c97a338a..18d9da53282b 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -80,8 +80,8 @@
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
-#define DRIVER_DATE "20170619"
-#define DRIVER_TIMESTAMP 1497857498
+#define DRIVER_DATE "20170818"
+#define DRIVER_TIMESTAMP 1503088845
/* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
* WARN_ON()) for hw state sanity checks to check for unexpected conditions
@@ -122,7 +122,7 @@ static inline bool is_fixed16_zero(uint_fixed_16_16_t val)
return false;
}
-static inline uint_fixed_16_16_t u32_to_fixed_16_16(uint32_t val)
+static inline uint_fixed_16_16_t u32_to_fixed16(uint32_t val)
{
uint_fixed_16_16_t fp;
@@ -132,17 +132,17 @@ static inline uint_fixed_16_16_t u32_to_fixed_16_16(uint32_t val)
return fp;
}
-static inline uint32_t fixed_16_16_to_u32_round_up(uint_fixed_16_16_t fp)
+static inline uint32_t fixed16_to_u32_round_up(uint_fixed_16_16_t fp)
{
return DIV_ROUND_UP(fp.val, 1 << 16);
}
-static inline uint32_t fixed_16_16_to_u32(uint_fixed_16_16_t fp)
+static inline uint32_t fixed16_to_u32(uint_fixed_16_16_t fp)
{
return fp.val >> 16;
}
-static inline uint_fixed_16_16_t min_fixed_16_16(uint_fixed_16_16_t min1,
+static inline uint_fixed_16_16_t min_fixed16(uint_fixed_16_16_t min1,
uint_fixed_16_16_t min2)
{
uint_fixed_16_16_t min;
@@ -151,7 +151,7 @@ static inline uint_fixed_16_16_t min_fixed_16_16(uint_fixed_16_16_t min1,
return min;
}
-static inline uint_fixed_16_16_t max_fixed_16_16(uint_fixed_16_16_t max1,
+static inline uint_fixed_16_16_t max_fixed16(uint_fixed_16_16_t max1,
uint_fixed_16_16_t max2)
{
uint_fixed_16_16_t max;
@@ -160,6 +160,14 @@ static inline uint_fixed_16_16_t max_fixed_16_16(uint_fixed_16_16_t max1,
return max;
}
+static inline uint_fixed_16_16_t clamp_u64_to_fixed16(uint64_t val)
+{
+ uint_fixed_16_16_t fp;
+ WARN_ON(val >> 32);
+ fp.val = clamp_t(uint32_t, val, 0, ~0);
+ return fp;
+}
+
static inline uint32_t div_round_up_fixed16(uint_fixed_16_16_t val,
uint_fixed_16_16_t d)
{
@@ -170,48 +178,30 @@ static inline uint32_t mul_round_up_u32_fixed16(uint32_t val,
uint_fixed_16_16_t mul)
{
uint64_t intermediate_val;
- uint32_t result;
intermediate_val = (uint64_t) val * mul.val;
intermediate_val = DIV_ROUND_UP_ULL(intermediate_val, 1 << 16);
WARN_ON(intermediate_val >> 32);
- result = clamp_t(uint32_t, intermediate_val, 0, ~0);
- return result;
+ return clamp_t(uint32_t, intermediate_val, 0, ~0);
}
static inline uint_fixed_16_16_t mul_fixed16(uint_fixed_16_16_t val,
uint_fixed_16_16_t mul)
{
uint64_t intermediate_val;
- uint_fixed_16_16_t fp;
intermediate_val = (uint64_t) val.val * mul.val;
intermediate_val = intermediate_val >> 16;
- WARN_ON(intermediate_val >> 32);
- fp.val = clamp_t(uint32_t, intermediate_val, 0, ~0);
- return fp;
+ return clamp_u64_to_fixed16(intermediate_val);
}
-static inline uint_fixed_16_16_t fixed_16_16_div(uint32_t val, uint32_t d)
+static inline uint_fixed_16_16_t div_fixed16(uint32_t val, uint32_t d)
{
- uint_fixed_16_16_t fp, res;
-
- fp = u32_to_fixed_16_16(val);
- res.val = DIV_ROUND_UP(fp.val, d);
- return res;
-}
-
-static inline uint_fixed_16_16_t fixed_16_16_div_u64(uint32_t val, uint32_t d)
-{
- uint_fixed_16_16_t res;
uint64_t interm_val;
interm_val = (uint64_t)val << 16;
interm_val = DIV_ROUND_UP_ULL(interm_val, d);
- WARN_ON(interm_val >> 32);
- res.val = (uint32_t) interm_val;
-
- return res;
+ return clamp_u64_to_fixed16(interm_val);
}
static inline uint32_t div_round_up_u32_fixed16(uint32_t val,
@@ -225,16 +215,32 @@ static inline uint32_t div_round_up_u32_fixed16(uint32_t val,
return clamp_t(uint32_t, interm_val, 0, ~0);
}
-static inline uint_fixed_16_16_t mul_u32_fixed_16_16(uint32_t val,
+static inline uint_fixed_16_16_t mul_u32_fixed16(uint32_t val,
uint_fixed_16_16_t mul)
{
uint64_t intermediate_val;
- uint_fixed_16_16_t fp;
intermediate_val = (uint64_t) val * mul.val;
- WARN_ON(intermediate_val >> 32);
- fp.val = (uint32_t) intermediate_val;
- return fp;
+ return clamp_u64_to_fixed16(intermediate_val);
+}
+
+static inline uint_fixed_16_16_t add_fixed16(uint_fixed_16_16_t add1,
+ uint_fixed_16_16_t add2)
+{
+ uint64_t interm_sum;
+
+ interm_sum = (uint64_t) add1.val + add2.val;
+ return clamp_u64_to_fixed16(interm_sum);
+}
+
+static inline uint_fixed_16_16_t add_fixed16_u32(uint_fixed_16_16_t add1,
+ uint32_t add2)
+{
+ uint64_t interm_sum;
+ uint_fixed_16_16_t interm_add2 = u32_to_fixed16(add2);
+
+ interm_sum = (uint64_t) add1.val + interm_add2.val;
+ return clamp_u64_to_fixed16(interm_sum);
}
static inline const char *yesno(bool v)
@@ -584,8 +590,7 @@ struct drm_i915_file_private {
struct idr context_idr;
struct intel_rps_client {
- struct list_head link;
- unsigned boosts;
+ atomic_t boosts;
} rps;
unsigned int bsd_engine;
@@ -597,7 +602,7 @@ struct drm_i915_file_private {
* to limit the badly behaving clients access to gpu.
*/
#define I915_MAX_CLIENT_CONTEXT_BANS 3
- int context_bans;
+ atomic_t context_bans;
};
/* Used by dp and fdi links */
@@ -641,6 +646,7 @@ struct intel_opregion {
u32 swsci_sbcb_sub_functions;
struct opregion_asle *asle;
void *rvda;
+ void *vbt_firmware;
const void *vbt;
u32 vbt_size;
u32 *lid_state;
@@ -710,11 +716,6 @@ struct drm_i915_display_funcs {
void (*fdi_link_train)(struct intel_crtc *crtc,
const struct intel_crtc_state *crtc_state);
void (*init_clock_gating)(struct drm_i915_private *dev_priv);
- int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- struct drm_i915_gem_object *obj,
- struct drm_i915_gem_request *req,
- uint32_t flags);
void (*hpd_irq_setup)(struct drm_i915_private *dev_priv);
/* clock updates for mode set */
/* cursor updates */
@@ -753,6 +754,7 @@ struct intel_csr {
func(has_csr); \
func(has_ddi); \
func(has_dp_mst); \
+ func(has_reset_engine); \
func(has_fbc); \
func(has_fpga_dbg); \
func(has_full_ppgtt); \
@@ -917,6 +919,7 @@ struct i915_gpu_state {
enum intel_engine_hangcheck_action hangcheck_action;
struct i915_address_space *vm;
int num_requests;
+ u32 reset_count;
/* position of active request inside the ring */
u32 rq_head, rq_post, rq_tail;
@@ -1056,6 +1059,11 @@ struct intel_fbc {
bool underrun_detected;
struct work_struct underrun_work;
+ /*
+ * Due to the atomic rules we can't access some structures without the
+ * appropriate locking, so we cache information here in order to avoid
+ * these problems.
+ */
struct intel_fbc_state_cache {
struct i915_vma *vma;
@@ -1077,6 +1085,13 @@ struct intel_fbc {
} fb;
} state_cache;
+ /*
+ * This structure contains everything that's relevant to program the
+ * hardware registers. When we want to figure out if we need to disable
+ * and re-enable FBC for a new configuration we just check if there's
+ * something different in the struct. The genx_fbc_activate functions
+ * are supposed to read from it in order to program the registers.
+ */
struct intel_fbc_reg_params {
struct i915_vma *vma;
@@ -1149,11 +1164,11 @@ struct i915_psr {
enum intel_pch {
PCH_NONE = 0, /* No PCH present */
PCH_IBX, /* Ibexpeak PCH */
- PCH_CPT, /* Cougarpoint PCH */
- PCH_LPT, /* Lynxpoint PCH */
+ PCH_CPT, /* Cougarpoint/Pantherpoint PCH */
+ PCH_LPT, /* Lynxpoint/Wildcatpoint PCH */
PCH_SPT, /* Sunrisepoint PCH */
- PCH_KBP, /* Kabypoint PCH */
- PCH_CNP, /* Cannonpoint PCH */
+ PCH_KBP, /* Kaby Lake PCH */
+ PCH_CNP, /* Cannon Lake PCH */
PCH_NOP,
};
@@ -1166,6 +1181,7 @@ enum intel_sbi_destination {
#define QUIRK_INVERT_BRIGHTNESS (1<<2)
#define QUIRK_BACKLIGHT_PRESENT (1<<3)
#define QUIRK_PIN_SWIZZLED_PAGES (1<<5)
+#define QUIRK_INCREASE_T12_DELAY (1<<6)
struct intel_fbdev;
struct intel_fbc_work;
@@ -1301,13 +1317,10 @@ struct intel_gen6_power_mgmt {
int last_adj;
enum { LOW_POWER, BETWEEN, HIGH_POWER } power;
- spinlock_t client_lock;
- struct list_head clients;
- bool client_boost;
-
bool enabled;
struct delayed_work autoenable_work;
- unsigned boosts;
+ atomic_t num_waiters;
+ atomic_t boosts;
/* manual wa residency calculations */
struct intel_rps_ei ei;
@@ -1383,12 +1396,23 @@ struct i915_power_well {
bool hw_enabled;
u64 domains;
/* unique identifier for this power well */
- unsigned long id;
+ enum i915_power_well_id id;
/*
* Arbitraty data associated with this power well. Platform and power
* well specific.
*/
- unsigned long data;
+ union {
+ struct {
+ enum dpio_phy phy;
+ } bxt;
+ struct {
+ /* Mask of pipes whose IRQ logic is backed by the pw */
+ u8 irq_pipe_mask;
+ /* The pw is backing the VGA functionality */
+ bool has_vga:1;
+ bool has_fuses:1;
+ } hsw;
+ };
const struct i915_power_well_ops *ops;
};
@@ -1505,6 +1529,8 @@ struct i915_gpu_error {
/* Protected by the above dev->gpu_error.lock. */
struct i915_gpu_state *first_error;
+ atomic_t pending_fb_pin;
+
unsigned long missed_irq_rings;
/**
@@ -1550,6 +1576,12 @@ struct i915_gpu_error {
* inspect the bit and do the reset directly, otherwise the worker
* waits for the struct_mutex.
*
+ * #I915_RESET_ENGINE[num_engines] - Since the driver doesn't need to
+ * acquire the struct_mutex to reset an engine, we need an explicit
+ * flag to prevent two concurrent reset attempts in the same engine.
+ * As the number of engines continues to grow, allocate the flags from
+ * the most significant bits.
+ *
* #I915_WEDGED - If reset fails and we can no longer use the GPU,
* we set the #I915_WEDGED bit. Prior to command submission, e.g.
* i915_gem_request_alloc(), this bit is checked and the sequence
@@ -1558,7 +1590,12 @@ struct i915_gpu_error {
unsigned long flags;
#define I915_RESET_BACKOFF 0
#define I915_RESET_HANDOFF 1
+#define I915_RESET_MODESET 2
#define I915_WEDGED (BITS_PER_LONG - 1)
+#define I915_RESET_ENGINE (I915_WEDGED - I915_NUM_ENGINES)
+
+ /** Number of times an engine has been reset */
+ u32 reset_engine_count[I915_NUM_ENGINES];
/**
* Waitqueue to signal when a hang is detected. Used to for waiters
@@ -1869,6 +1906,7 @@ struct i915_workarounds {
struct i915_virtual_gpu {
bool active;
+ u32 caps;
};
/* used in computing the new watermarks state */
@@ -1888,6 +1926,24 @@ struct i915_oa_reg {
u32 value;
};
+struct i915_oa_config {
+ char uuid[UUID_STRING_LEN + 1];
+ int id;
+
+ const struct i915_oa_reg *mux_regs;
+ u32 mux_regs_len;
+ const struct i915_oa_reg *b_counter_regs;
+ u32 b_counter_regs_len;
+ const struct i915_oa_reg *flex_regs;
+ u32 flex_regs_len;
+
+ struct attribute_group sysfs_metric;
+ struct attribute *attrs[2];
+ struct device_attribute sysfs_metric_id;
+
+ atomic_t ref_count;
+};
+
struct i915_perf_stream;
/**
@@ -2000,6 +2056,11 @@ struct i915_perf_stream {
* type of configured stream.
*/
const struct i915_perf_stream_ops *ops;
+
+ /**
+ * @oa_config: The OA configuration used by the stream.
+ */
+ struct i915_oa_config *oa_config;
};
/**
@@ -2007,6 +2068,25 @@ struct i915_perf_stream {
*/
struct i915_oa_ops {
/**
+ * @is_valid_b_counter_reg: Validates register's address for
+ * programming boolean counters for a particular platform.
+ */
+ bool (*is_valid_b_counter_reg)(struct drm_i915_private *dev_priv,
+ u32 addr);
+
+ /**
+ * @is_valid_mux_reg: Validates register's address for programming mux
+ * for a particular platform.
+ */
+ bool (*is_valid_mux_reg)(struct drm_i915_private *dev_priv, u32 addr);
+
+ /**
+ * @is_valid_flex_reg: Validates register's address for programming
+ * flex EU filtering for a particular platform.
+ */
+ bool (*is_valid_flex_reg)(struct drm_i915_private *dev_priv, u32 addr);
+
+ /**
* @init_oa_buffer: Resets the head and tail pointers of the
* circular buffer for periodic OA reports.
*
@@ -2024,20 +2104,13 @@ struct i915_oa_ops {
void (*init_oa_buffer)(struct drm_i915_private *dev_priv);
/**
- * @select_metric_set: The auto generated code that checks whether a
- * requested OA config is applicable to the system and if so sets up
- * the mux, oa and flex eu register config pointers according to the
- * current dev_priv->perf.oa.metrics_set.
- */
- int (*select_metric_set)(struct drm_i915_private *dev_priv);
-
- /**
* @enable_metric_set: Selects and applies any MUX configuration to set
* up the Boolean and Custom (B/C) counters that are part of the
* counter reports being sampled. May apply system constraints such as
* disabling EU clock gating as required.
*/
- int (*enable_metric_set)(struct drm_i915_private *dev_priv);
+ int (*enable_metric_set)(struct drm_i915_private *dev_priv,
+ const struct i915_oa_config *oa_config);
/**
* @disable_metric_set: Remove system constraints associated with using
@@ -2083,6 +2156,7 @@ struct drm_i915_private {
struct kmem_cache *objects;
struct kmem_cache *vmas;
+ struct kmem_cache *luts;
struct kmem_cache *requests;
struct kmem_cache *dependencies;
struct kmem_cache *priorities;
@@ -2133,9 +2207,6 @@ struct drm_i915_private {
/* protects the irq masks */
spinlock_t irq_lock;
- /* protects the mmio flip data */
- spinlock_t mmio_flip_lock;
-
bool display_irqs_enabled;
/* To control wakeup latency, e.g. for irq-driven dp aux transfers. */
@@ -2236,18 +2307,10 @@ struct drm_i915_private {
DECLARE_HASHTABLE(mm_structs, 7);
struct mutex mm_lock;
- /* The hw wants to have a stable context identifier for the lifetime
- * of the context (for OA, PASID, faults, etc). This is limited
- * in execlists to 21 bits.
- */
- struct ida context_hw_ida;
-#define MAX_CONTEXT_HW_ID (1<<21) /* exclusive */
-
/* Kernel Modesetting */
struct intel_crtc *plane_to_crtc_mapping[I915_MAX_PIPES];
struct intel_crtc *pipe_to_crtc_mapping[I915_MAX_PIPES];
- wait_queue_head_t pending_flip_queue;
#ifdef CONFIG_DEBUG_FS
struct intel_pipe_crc pipe_crc[I915_MAX_PIPES];
@@ -2303,11 +2366,9 @@ struct drm_i915_private {
struct drm_i915_gem_object *vlv_pctx;
-#ifdef CONFIG_DRM_FBDEV_EMULATION
/* list of fbdev register on this device */
struct intel_fbdev *fbdev;
struct work_struct fbdev_suspend_work;
-#endif
struct drm_property *broadcast_rgb_property;
struct drm_property *force_audio_property;
@@ -2321,7 +2382,18 @@ struct drm_i915_private {
*/
struct mutex av_mutex;
- struct list_head context_list;
+ struct {
+ struct list_head list;